aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-23 18:58:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-23 18:58:18 -0800
commitef96152e6a36e0510387cb174178b7982c1ae879 (patch)
treef2b881feb97893dd6e73380fe206bbfd5110559e
parentMerge tag 'usercopy-v4.11-rc1.fix' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux (diff)
parentlib/Kconfig: make PRIME_NUMBERS not user selectable. (diff)
downloadlinux-dev-ef96152e6a36e0510387cb174178b7982c1ae879.tar.xz
linux-dev-ef96152e6a36e0510387cb174178b7982c1ae879.zip
Merge tag 'drm-for-v4.11-less-shouty' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main drm pull request for v4.11. Nothing too major, the tinydrm and mmu-less support should make writing smaller drivers easier for some of the simpler platforms, and there are a bunch of documentation updates. Intel grew displayport MST audio support which is hopefully useful to people, and FBC is on by default for GEN9+ (so people know where to look for regressions). AMDGPU has a lot of fixes that would like new firmware files installed for some GPUs. Other than that it's pretty scattered all over. I may have a follow up pull request as I know BenH has a bunch of AST rework and fixes and I'd like to get those in once they've been tested by AST, and I've got at least one pull request I'm just trying to get the author to fix up. Core: - drm_mm reworked - Connector list locking and iterators - Documentation updates - Format handling rework - MMU-less support for fbdev helpers - drm_crtc_from_index helper - Core CRC API - Remove drm_framebuffer_unregister_private - Debugfs cleanup - EDID/Infoframe fixes - Release callback - Tinydrm support (smaller drivers for simple hw) panel: - Add support for some new simple panels i915: - FBC by default for gen9+ - Shared dpll cleanups and docs - GEN8 powerdomain cleanup - DMC support on GLK - DP MST audio support - HuC loading support - GVT init ordering fixes - GVT IOMMU workaround fix amdgpu/radeon: - Power/clockgating improvements - Preliminary SR-IOV support - TTM buffer priority and eviction fixes - SI DPM quirks removed due to firmware fixes - Powerplay improvements - VCE/UVD powergating fixes - Cleanup SI GFX code to match CI/VI - Support for > 2 displays on 3/5 crtc asics - SI headless fixes nouveau: - Rework securre boot code in prep for GP10x secure boot - Channel recovery improvements - Initial power budget code - MMU rework preperation vmwgfx: - Bunch of fixes and cleanups exynos: - Runtime PM support for MIC driver - Cleanups to use atomic helpers - UHD Support for TM2/TM2E boards - Trigger mode fix for Rinato board etnaviv: - Shader performance fix - Command stream validator fixes - Command buffer suballocator rockchip: - CDN DisplayPort support - IOMMU support for arm64 platform imx-drm: - Fix i.MX5 TV encoder probing - Remove lower fb size limits msm: - Support for HW cursor on MDP5 devices - DSI encoder cleanup - GPU DT bindings cleanup sti: - stih410 cleanups - Create fbdev at binding - HQVDP fixes - Remove stih416 chip functionality - DVI/HDMI mode selection fixes - FPS statistic reporting omapdrm: - IRQ code cleanup dwi-hdmi bridge: - Cleanups and fixes adv-bridge: - Updates for nexus sii8520 bridge: - Add interlace mode support - Rework HDMI and lots of fixes qxl: - probing/teardown cleanups ZTE drm: - HDMI audio via SPDIF interface - Video Layer overlay plane support - Add TV encoder output device atmel-hlcdc: - Rework fbdev creation logic tegra: - OF node fix fsl-dcu: - Minor fixes mali-dp: - Assorted fixes sunxi: - Minor fix" [ This was the "fixed" pull, that still had build warnings due to people not even having build tested the result. I'm not a happy camper I've fixed the things I noticed up in this merge. - Linus ] * tag 'drm-for-v4.11-less-shouty' of git://people.freedesktop.org/~airlied/linux: (1177 commits) lib/Kconfig: make PRIME_NUMBERS not user selectable drm/tinydrm: helpers: Properly fix backlight dependency drm/tinydrm: mipi-dbi: Fix field width specifier warning drm/tinydrm: mipi-dbi: Silence: ‘cmd’ may be used uninitialized drm/sti: fix build warnings in sti_drv.c and sti_vtg.c files drm/amd/powerplay: fix PSI feature on Polars12 drm/amdgpu: refuse to reserve io mem for split VRAM buffers drm/ttm: fix use-after-free races in vm fault handling drm/tinydrm: Add support for Multi-Inno MI0283QT display dt-bindings: Add Multi-Inno MI0283QT binding dt-bindings: display/panel: Add common rotation property of: Add vendor prefix for Multi-Inno drm/tinydrm: Add MIPI DBI support drm/tinydrm: Add helper functions drm: Add DRM support for tiny LCD displays drm/amd/amdgpu: post card if there is real hw resetting performed drm/nouveau/tmr: provide backtrace when a timeout is hit drm/nouveau/pci/g92: Fix rearm drm/nouveau/drm/therm/fan: add a fallback if no fan control is specified in the vbios drm/nouveau/hwmon: expose power_max and power_crit ..
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt35
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt12
-rw-r--r--Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt85
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt46
-rw-r--r--Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt2
-rw-r--r--Documentation/devicetree/bindings/display/imx/hdmi.txt51
-rw-r--r--Documentation/devicetree/bindings/display/msm/gpu.txt38
-rw-r--r--Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt27
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel.txt4
-rw-r--r--Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt7
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt43
-rw-r--r--Documentation/devicetree/bindings/display/zte,vou.txt15
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt3
-rw-r--r--Documentation/dma-buf-sharing.txt482
-rw-r--r--Documentation/driver-api/dma-buf.rst92
-rw-r--r--Documentation/gpu/drm-kms.rst8
-rw-r--r--Documentation/gpu/drm-mm.rst61
-rw-r--r--Documentation/gpu/drm-uapi.rst25
-rw-r--r--Documentation/gpu/i915.rst103
-rw-r--r--Documentation/gpu/index.rst1
-rw-r--r--Documentation/gpu/introduction.rst15
-rw-r--r--Documentation/gpu/tinydrm.rst42
-rw-r--r--Documentation/sound/hd-audio/dp-mst.rst17
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/blackfin/include/asm/vga.h1
-rw-r--r--drivers/char/agp/intel-gtt.c6
-rw-r--r--drivers/dma-buf/dma-buf.c207
-rw-r--r--drivers/dma-buf/dma-fence.c27
-rw-r--r--drivers/dma-buf/sync_debug.c17
-rw-r--r--drivers/dma-buf/sync_file.c21
-rw-r--r--drivers/gpu/drm/Kconfig36
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c229
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_si.h (renamed from drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h)0
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c2320
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.h239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c995
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smumgr.h94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c898
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c592
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c1072
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h (renamed from drivers/gpu/drm/amd/include/asic_reg/si/sid.h)0
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c451
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi_dpm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c4
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_d.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_d.h9
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h9
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h105
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h3
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c741
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c86
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c99
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c175
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c241
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c378
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c28
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h20
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h21
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_instance.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h20
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c181
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c44
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c36
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c43
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c32
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c3
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c3
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c5
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c18
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c9
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c76
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c41
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h13
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c102
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h8
-rw-r--r--drivers/gpu/drm/armada/Kconfig2
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c9
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c6
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c6
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c7
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c4
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h3
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c7
-rw-r--r--drivers/gpu/drm/ast/ast_main.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c16
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c17
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c4
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c22
-rw-r--r--drivers/gpu/drm/bochs/Kconfig2
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c13
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c5
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c153
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c9
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c1
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c441
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.h85
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c949
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.h50
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c8
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c7
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c9
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c141
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c362
-rw-r--r--drivers/gpu/drm/drm_auth.c12
-rw-r--r--drivers/gpu/drm/drm_blend.c11
-rw-r--r--drivers/gpu/drm/drm_bridge.c88
-rw-r--r--drivers/gpu/drm/drm_cache.c27
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c28
-rw-r--r--drivers/gpu/drm/drm_connector.c228
-rw-r--r--drivers/gpu/drm/drm_crtc.c83
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c85
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h11
-rw-r--r--drivers/gpu/drm/drm_debugfs.c30
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c34
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c98
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c80
-rw-r--r--drivers/gpu/drm/drm_encoder.c23
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c135
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c96
-rw-r--r--drivers/gpu/drm/drm_fops.c15
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c71
-rw-r--r--drivers/gpu/drm/drm_gem.c24
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c93
-rw-r--r--drivers/gpu/drm/drm_global.c23
-rw-r--r--drivers/gpu/drm/drm_internal.h9
-rw-r--r--drivers/gpu/drm/drm_ioctl.c34
-rw-r--r--drivers/gpu/drm/drm_irq.c75
-rw-r--r--drivers/gpu/drm/drm_legacy.h7
-rw-r--r--drivers/gpu/drm/drm_mm.c1023
-rw-r--r--drivers/gpu/drm/drm_mode_config.c150
-rw-r--r--drivers/gpu/drm/drm_mode_object.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c28
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c25
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c10
-rw-r--r--drivers/gpu/drm/drm_of.c1
-rw-r--r--drivers/gpu/drm/drm_panel.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c8
-rw-r--r--drivers/gpu/drm/drm_plane.c20
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c17
-rw-r--r--drivers/gpu/drm/drm_platform.c6
-rw-r--r--drivers/gpu/drm/drm_prime.c19
-rw-r--r--drivers/gpu/drm/drm_print.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c85
-rw-r--r--drivers/gpu/drm/drm_property.c6
-rw-r--r--drivers/gpu/drm/drm_rect.c6
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c23
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig3
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c153
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h58
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c95
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c76
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h10
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c100
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c118
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c126
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c80
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c14
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c13
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.c12
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c8
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c13
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c17
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c13
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c9
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c7
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c5
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c27
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c15
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h5
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c24
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c1
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug15
-rw-r--r--drivers/gpu/drm/i915/Makefile11
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c43
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c31
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c70
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c57
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c108
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h12
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c14
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c174
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1150
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c218
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h925
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c525
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c297
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h277
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c172
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c100
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c617
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h95
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c49
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c113
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c146
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c347
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c116
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h13
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c959
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c169
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c752
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h38
-rw-r--r--drivers/gpu/drm/i915/i915_params.c6
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c183
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2096
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h488
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c16
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c141
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h34
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h64
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c33
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c266
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h59
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c31
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c51
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c39
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c27
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c10
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c16
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c70
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c55
-rw-r--r--drivers/gpu/drm/i915/intel_display.c827
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c366
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c31
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c130
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c351
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h178
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h111
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c46
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c18
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c9
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c30
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c19
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h79
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c255
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c658
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c256
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c198
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c338
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c259
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h11
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c99
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c7
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c13
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c289
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c32
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c1011
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c843
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c209
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c155
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h66
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c163
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c21
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c131
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c116
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h (renamed from drivers/gpu/drm/i915/intel_guc.h)108
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c14
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h12
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c14
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c7
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c8
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c18
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c40
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c6
-rw-r--r--drivers/gpu/drm/lib/drm_random.c41
-rw-r--r--drivers/gpu/drm/lib/drm_random.h25
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c11
-rw-r--r--drivers/gpu/drm/meson/Makefile6
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c4
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c24
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c37
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c100
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig8
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c62
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h51
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h269
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c25
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c97
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c257
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c239
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h20
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c169
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h11
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c1104
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c28
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h48
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c135
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c73
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c77
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c123
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h45
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c183
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h1
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c26
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c6
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c20
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h12
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c12
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c13
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c7
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h9
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c6
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c60
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c80
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h42
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826e.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl906f.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0000.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h76
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c358
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c177
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c110
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c104
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c170
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c266
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c307
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c266
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c126
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c936
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h250
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c254
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c1391
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c126
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c158
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h199
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c14
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c148
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c15
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c232
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h52
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c176
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c13
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c242
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c24
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c102
-rw-r--r--drivers/gpu/drm/qxl/Kconfig2
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c35
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c57
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h11
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c18
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c19
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c19
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c47
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c4
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig10
-rw-r--r--drivers/gpu/drm/rockchip/Makefile2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c1260
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h112
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c979
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.h483
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c118
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c244
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c39
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c4
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h2
-rw-r--r--drivers/gpu/drm/selftests/Makefile1
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h24
-rw-r--r--drivers/gpu/drm/selftests/drm_selftest.c109
-rw-r--r--drivers/gpu/drm/selftests/drm_selftest.h41
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c2276
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c4
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c4
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c6
-rw-r--r--drivers/gpu/drm/sti/Makefile1
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c46
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c162
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h7
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c13
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c95
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c14
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c257
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h17
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c31
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c17
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c8
-rw-r--r--drivers/gpu/drm/sti/sti_vtac.c223
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c63
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c13
-rw-r--r--drivers/gpu/drm/tegra/dc.c8
-rw-r--r--drivers/gpu/drm/tegra/drm.c46
-rw-r--r--drivers/gpu/drm/tegra/drm.h1
-rw-r--r--drivers/gpu/drm/tegra/fb.c15
-rw-r--r--drivers/gpu/drm/tegra/gem.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c19
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c4
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig21
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile7
-rw-r--r--drivers/gpu/drm/tinydrm/core/Makefile3
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c376
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c460
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c234
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c279
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c1005
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c129
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c12
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c9
-rw-r--r--drivers/gpu/drm/udl/udl_main.c3
-rw-r--r--drivers/gpu/drm/vc4/Kconfig2
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c52
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h9
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c1725
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h5
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h1
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c4
-rw-r--r--drivers/gpu/drm/via/via_drv.h2
-rw-r--r--drivers/gpu/drm/via/via_map.c4
-rw-r--r--drivers/gpu/drm/via/via_mm.c4
-rw-r--r--drivers/gpu/drm/virtio/Kconfig2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c2
-rw-r--r--drivers/gpu/drm/zte/Kconfig2
-rw-r--r--drivers/gpu/drm/zte/Makefile1
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c3
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.h1
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c160
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi_regs.h14
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c332
-rw-r--r--drivers/gpu/drm/zte/zx_plane.h12
-rw-r--r--drivers/gpu/drm/zte/zx_plane_regs.h51
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c407
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc_regs.h31
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c371
-rw-r--r--drivers/gpu/drm/zte/zx_vou.h48
-rw-r--r--drivers/gpu/drm/zte/zx_vou_regs.h51
-rw-r--r--drivers/gpu/host1x/bus.c1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c1
-rw-r--r--drivers/video/fbdev/Kconfig8
-rw-r--r--drivers/video/fbdev/core/fbmem.c18
-rw-r--r--include/drm/bridge/dw_hdmi.h19
-rw-r--r--include/drm/bridge/mhl.h85
-rw-r--r--include/drm/drmP.h34
-rw-r--r--include/drm/drm_atomic.h37
-rw-r--r--include/drm/drm_atomic_helper.h22
-rw-r--r--include/drm/drm_auth.h17
-rw-r--r--include/drm/drm_bridge.h46
-rw-r--r--include/drm/drm_cache.h4
-rw-r--r--include/drm/drm_color_mgmt.h27
-rw-r--r--include/drm/drm_connector.h107
-rw-r--r--include/drm/drm_crtc.h98
-rw-r--r--include/drm/drm_crtc_helper.h1
-rw-r--r--include/drm/drm_dp_helper.h13
-rw-r--r--include/drm/drm_dp_mst_helper.h14
-rw-r--r--include/drm/drm_drv.h79
-rw-r--r--include/drm/drm_edid.h10
-rw-r--r--include/drm/drm_encoder.h9
-rw-r--r--include/drm/drm_encoder_slave.h1
-rw-r--r--include/drm/drm_fb_cma_helper.h12
-rw-r--r--include/drm/drm_fb_helper.h10
-rw-r--r--include/drm/drm_flip_work.h2
-rw-r--r--include/drm/drm_framebuffer.h46
-rw-r--r--include/drm/drm_gem.h16
-rw-r--r--include/drm/drm_gem_cma_helper.h17
-rw-r--r--include/drm/drm_irq.h8
-rw-r--r--include/drm/drm_mm.h443
-rw-r--r--include/drm/drm_mode_config.h30
-rw-r--r--include/drm/drm_mode_object.h13
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper.h3
-rw-r--r--include/drm/drm_modeset_helper_vtables.h147
-rw-r--r--include/drm/drm_modeset_lock.h2
-rw-r--r--include/drm/drm_panel.h4
-rw-r--r--include/drm/drm_plane.h28
-rw-r--r--include/drm/drm_print.h24
-rw-r--r--include/drm/drm_property.h8
-rw-r--r--include/drm/drm_simple_kms_helper.h18
-rw-r--r--include/drm/i915_pciids.h21
-rw-r--r--include/drm/intel-gtt.h6
-rw-r--r--include/drm/tinydrm/ili9341.h54
-rw-r--r--include/drm/tinydrm/mipi-dbi.h107
-rw-r--r--include/drm/tinydrm/tinydrm-helpers.h81
-rw-r--r--include/drm/tinydrm/tinydrm.h115
-rw-r--r--include/drm/ttm/ttm_bo_api.h2
-rw-r--r--include/drm/ttm/ttm_bo_driver.h31
-rw-r--r--include/linux/dma-buf.h224
-rw-r--r--include/linux/dma-fence.h52
-rw-r--r--include/linux/prime_numbers.h37
-rw-r--r--include/linux/reservation.h34
-rw-r--r--include/uapi/drm/Kbuild1
-rw-r--r--include/uapi/drm/amdgpu_drm.h9
-rw-r--r--include/uapi/drm/drm_fourcc.h59
-rw-r--r--include/uapi/drm/i915_drm.h136
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/video/exynos5433_decon.h2
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/prime_numbers.c315
-rw-r--r--sound/pci/hda/hda_codec.c76
-rw-r--r--sound/pci/hda/hda_codec.h3
-rw-r--r--sound/pci/hda/patch_hdmi.c245
-rwxr-xr-xtools/testing/selftests/drivers/gpu/drm_mm.sh15
-rwxr-xr-xtools/testing/selftests/lib/prime_numbers.sh15
923 files changed, 46348 insertions, 22490 deletions
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
index e2768703ac2b..34c7fddcea39 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
+++ b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
@@ -56,6 +56,18 @@ Required properties for V3D:
- interrupts: The interrupt number
See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+Required properties for DSI:
+- compatible: Should be "brcm,bcm2835-dsi0" or "brcm,bcm2835-dsi1"
+- reg: Physical base address and length of the DSI block's registers
+- interrupts: The interrupt number
+ See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+- clocks: a) phy: The DSI PLL clock feeding the DSI analog PHY
+ b) escape: The DSI ESC clock from CPRMAN
+ c) pixel: The DSI pixel clock from CPRMAN
+- clock-output-names:
+ The 3 clocks output from the DSI analog PHY: dsi[01]_byte,
+ dsi[01]_ddr2, and dsi[01]_ddr
+
[1] Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
@@ -99,6 +111,29 @@ dpi: dpi@7e208000 {
};
};
+dsi1: dsi@7e700000 {
+ compatible = "brcm,bcm2835-dsi1";
+ reg = <0x7e700000 0x8c>;
+ interrupts = <2 12>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+
+ clocks = <&clocks BCM2835_PLLD_DSI1>,
+ <&clocks BCM2835_CLOCK_DSI1E>,
+ <&clocks BCM2835_CLOCK_DSI1P>;
+ clock-names = "phy", "escape", "pixel";
+
+ clock-output-names = "dsi1_byte", "dsi1_ddr2", "dsi1_ddr";
+
+ pitouchscreen: panel@0 {
+ compatible = "raspberrypi,touchscreen";
+ reg = <0>;
+
+ <...>
+ };
+};
+
vec: vec@7e806000 {
compatible = "brcm,bcm2835-vec";
reg = <0x7e806000 0x1000>;
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 6532a59c9b43..00ea670b8c4d 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -38,10 +38,22 @@ The following input format properties are required except in "rgb 1x" and
- adi,input-justification: The input bit justification ("left", "evenly",
"right").
+- avdd-supply: A 1.8V supply that powers up the AVDD pin on the chip.
+- dvdd-supply: A 1.8V supply that powers up the DVDD pin on the chip.
+- pvdd-supply: A 1.8V supply that powers up the PVDD pin on the chip.
+- dvdd-3v-supply: A 3.3V supply that powers up the pin called DVDD_3V
+ on the chip.
+- bgvdd-supply: A 1.8V supply that powers up the BGVDD pin. This is
+ needed only for ADV7511.
+
The following properties are required for ADV7533:
- adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should
be one of 1, 2, 3 or 4.
+- a2vdd-supply: 1.8V supply that powers up the A2VDD pin on the chip.
+- v3p3-supply: A 3.3V supply that powers up the V3P3 pin on the chip.
+- v1p2-supply: A supply that powers up the V1P2 pin on the chip. It can be
+ either 1.2V or 1.8V.
Optional properties:
diff --git a/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt b/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
index 5e9a84d6e5f1..33bf981fbe33 100644
--- a/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
+++ b/Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt
@@ -1,52 +1,33 @@
-DesignWare HDMI bridge bindings
-
-Required properties:
-- compatible: platform specific such as:
- * "snps,dw-hdmi-tx"
- * "fsl,imx6q-hdmi"
- * "fsl,imx6dl-hdmi"
- * "rockchip,rk3288-dw-hdmi"
-- reg: Physical base address and length of the controller's registers.
-- interrupts: The HDMI interrupt number
-- clocks, clock-names : must have the phandles to the HDMI iahb and isfr clocks,
- as described in Documentation/devicetree/bindings/clock/clock-bindings.txt,
- the clocks are soc specific, the clock-names should be "iahb", "isfr"
--port@[X]: SoC specific port nodes with endpoint definitions as defined
- in Documentation/devicetree/bindings/media/video-interfaces.txt,
- please refer to the SoC specific binding document:
- * Documentation/devicetree/bindings/display/imx/hdmi.txt
- * Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
-
-Optional properties
-- reg-io-width: the width of the reg:1,4, default set to 1 if not present
-- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing,
- if the property is omitted, a functionally reduced I2C bus
- controller on DW HDMI is probed
-- clocks, clock-names: phandle to the HDMI CEC clock, name should be "cec"
-
-Example:
- hdmi: hdmi@0120000 {
- compatible = "fsl,imx6q-hdmi";
- reg = <0x00120000 0x9000>;
- interrupts = <0 115 0x04>;
- gpr = <&gpr>;
- clocks = <&clks 123>, <&clks 124>;
- clock-names = "iahb", "isfr";
- ddc-i2c-bus = <&i2c2>;
-
- port@0 {
- reg = <0>;
-
- hdmi_mux_0: endpoint {
- remote-endpoint = <&ipu1_di0_hdmi>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- hdmi_mux_1: endpoint {
- remote-endpoint = <&ipu1_di1_hdmi>;
- };
- };
- };
+Synopsys DesignWare HDMI TX Encoder
+===================================
+
+This document defines device tree properties for the Synopsys DesignWare HDMI
+TX Encoder (DWC HDMI TX). It doesn't constitue a device tree binding
+specification by itself but is meant to be referenced by platform-specific
+device tree bindings.
+
+When referenced from platform device tree bindings the properties defined in
+this document are defined as follows. The platform device tree bindings are
+responsible for defining whether each property is required or optional.
+
+- reg: Memory mapped base address and length of the DWC HDMI TX registers.
+
+- reg-io-width: Width of the registers specified by the reg property. The
+ value is expressed in bytes and must be equal to 1 or 4 if specified. The
+ register width defaults to 1 if the property is not present.
+
+- interrupts: Reference to the DWC HDMI TX interrupt.
+
+- clocks: References to all the clocks specified in the clock-names property
+ as specified in Documentation/devicetree/bindings/clock/clock-bindings.txt.
+
+- clock-names: The DWC HDMI TX uses the following clocks.
+
+ - "iahb" is the bus clock for either AHB and APB (mandatory).
+ - "isfr" is the internal register configuration clock (mandatory).
+ - "cec" is the HDMI CEC controller main clock (optional).
+
+- ports: The connectivity of the DWC HDMI TX with the rest of the system is
+ expressed in using ports as specified in the device graph bindings defined
+ in Documentation/devicetree/bindings/graph.txt. The numbering of the ports
+ is platform-specific.
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt b/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt
new file mode 100644
index 000000000000..6ec1a880ac18
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt
@@ -0,0 +1,46 @@
+THS8135 Video DAC
+-----------------
+
+This is the binding for Texas Instruments THS8135 Video DAC bridge.
+
+Required properties:
+
+- compatible: Must be "ti,ths8135"
+
+Required nodes:
+
+This device has two video ports. Their connections are modelled using the OF
+graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+- Video port 0 for RGB input
+- Video port 1 for VGA output
+
+Example
+-------
+
+vga-bridge {
+ compatible = "ti,ths8135";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ vga_bridge_in: endpoint {
+ remote-endpoint = <&lcdc_out_vga>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ vga_bridge_out: endpoint {
+ remote-endpoint = <&vga_con_in>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt b/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt
index 38dc9d60eef8..305a0e72a900 100644
--- a/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt
+++ b/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt
@@ -16,7 +16,7 @@ Required properties:
"clk_ade_core" for the ADE core clock.
"clk_codec_jpeg" for the media NOC QoS clock, which use the same clock with
jpeg codec.
- "clk_ade_pix" for the ADE pixel clok.
+ "clk_ade_pix" for the ADE pixel clock.
- assigned-clocks: Should contain "clk_ade_core" and "clk_codec_jpeg" clocks'
phandle + clock-specifier pairs.
- assigned-clock-rates: clock rates, one for each entry in assigned-clocks.
diff --git a/Documentation/devicetree/bindings/display/imx/hdmi.txt b/Documentation/devicetree/bindings/display/imx/hdmi.txt
index 1b756cf9afb0..66a8f86e5d12 100644
--- a/Documentation/devicetree/bindings/display/imx/hdmi.txt
+++ b/Documentation/devicetree/bindings/display/imx/hdmi.txt
@@ -1,29 +1,36 @@
-Device-Tree bindings for HDMI Transmitter
+Freescale i.MX6 DWC HDMI TX Encoder
+===================================
-HDMI Transmitter
-================
+The HDMI transmitter is a Synopsys DesignWare HDMI 1.4 TX controller IP
+with a companion PHY IP.
+
+These DT bindings follow the Synopsys DWC HDMI TX bindings defined in
+Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt with the
+following device-specific properties.
-The HDMI Transmitter is a Synopsys DesignWare HDMI 1.4 TX controller IP
-with accompanying PHY IP.
Required properties:
- - #address-cells : should be <1>
- - #size-cells : should be <0>
- - compatible : should be "fsl,imx6q-hdmi" or "fsl,imx6dl-hdmi".
- - gpr : should be <&gpr>.
- The phandle points to the iomuxc-gpr region containing the HDMI
- multiplexer control register.
- - clocks, clock-names : phandles to the HDMI iahb and isrf clocks, as described
- in Documentation/devicetree/bindings/clock/clock-bindings.txt and
- Documentation/devicetree/bindings/clock/imx6q-clock.txt.
- - port@[0-4]: Up to four port nodes with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt,
- corresponding to the four inputs to the HDMI multiplexer.
-
-Optional properties:
- - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
-
-example:
+
+- compatible : Shall be one of "fsl,imx6q-hdmi" or "fsl,imx6dl-hdmi".
+- reg: See dw_hdmi.txt.
+- interrupts: HDMI interrupt number
+- clocks: See dw_hdmi.txt.
+- clock-names: Shall contain "iahb" and "isfr" as defined in dw_hdmi.txt.
+- ports: See dw_hdmi.txt. The DWC HDMI shall have between one and four ports,
+ numbered 0 to 3, corresponding to the four inputs of the HDMI multiplexer.
+ Each port shall have a single endpoint.
+- gpr : Shall contain a phandle to the iomuxc-gpr region containing the HDMI
+ multiplexer control register.
+
+Optional properties
+
+- ddc-i2c-bus: The HDMI DDC bus can be connected to either a system I2C master
+ or the functionally-reduced I2C master contained in the DWC HDMI. When
+ connected to a system I2C master this property contains a phandle to that
+ I2C master controller.
+
+
+Example:
gpr: iomuxc-gpr@020e0000 {
/* ... */
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index 67d0a58dbb77..43fac0fe09bb 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -1,23 +1,19 @@
Qualcomm adreno/snapdragon GPU
Required properties:
-- compatible: "qcom,adreno-3xx"
+- compatible: "qcom,adreno-XYZ.W", "qcom,adreno"
+ for example: "qcom,adreno-306.0", "qcom,adreno"
+ Note that you need to list the less specific "qcom,adreno" (since this
+ is what the device is matched on), in addition to the more specific
+ with the chip-id.
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt signal from the gpu.
- clocks: device clocks
See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required:
- * "core_clk"
- * "iface_clk"
- * "mem_iface_clk"
-- qcom,chipid: gpu chip-id. Note this may become optional for future
- devices if we can reliably read the chipid from hw
-- qcom,gpu-pwrlevels: list of operating points
- - compatible: "qcom,gpu-pwrlevels"
- - for each qcom,gpu-pwrlevel:
- - qcom,gpu-freq: requested gpu clock speed
- - NOTE: downstream android driver defines additional parameters to
- configure memory bandwidth scaling per OPP.
+ * "core"
+ * "iface"
+ * "mem_iface"
Example:
@@ -25,28 +21,18 @@ Example:
...
gpu: qcom,kgsl-3d0@4300000 {
- compatible = "qcom,adreno-3xx";
+ compatible = "qcom,adreno-320.2", "qcom,adreno";
reg = <0x04300000 0x20000>;
reg-names = "kgsl_3d0_reg_memory";
interrupts = <GIC_SPI 80 0>;
interrupt-names = "kgsl_3d0_irq";
clock-names =
- "core_clk",
- "iface_clk",
- "mem_iface_clk";
+ "core",
+ "iface",
+ "mem_iface";
clocks =
<&mmcc GFX3D_CLK>,
<&mmcc GFX3D_AHB_CLK>,
<&mmcc MMSS_IMEM_AHB_CLK>;
- qcom,chipid = <0x03020100>;
- qcom,gpu-pwrlevels {
- compatible = "qcom,gpu-pwrlevels";
- qcom,gpu-pwrlevel@0 {
- qcom,gpu-freq = <450000000>;
- };
- qcom,gpu-pwrlevel@1 {
- qcom,gpu-freq = <27000000>;
- };
- };
};
};
diff --git a/Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt b/Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
new file mode 100644
index 000000000000..eed48c3d4875
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
@@ -0,0 +1,27 @@
+Multi-Inno MI0283QT display panel
+
+Required properties:
+- compatible: "multi-inno,mi0283qt".
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- dc-gpios: D/C pin. The presence/absence of this GPIO determines
+ the panel interface mode (IM[3:0] pins):
+ - present: IM=x110 4-wire 8-bit data serial interface
+ - absent: IM=x101 3-wire 9-bit data serial interface
+- reset-gpios: Reset pin
+- power-supply: A regulator node for the supply voltage.
+- backlight: phandle of the backlight device attached to the panel
+- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
+
+Example:
+ mi0283qt@0{
+ compatible = "multi-inno,mi0283qt";
+ reg = <0>;
+ spi-max-frequency = <32000000>;
+ rotation = <90>;
+ dc-gpios = <&gpio 25 0>;
+ backlight = <&backlight>;
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt b/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt
new file mode 100644
index 000000000000..b258d6a91ec6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt
@@ -0,0 +1,7 @@
+BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "boe,nv101wxmn51"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt b/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt
new file mode 100644
index 000000000000..c6d06b5eab51
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt
@@ -0,0 +1,7 @@
+Netron-DY E231732 7.0" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "netron-dy,e231732"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panel.txt b/Documentation/devicetree/bindings/display/panel/panel.txt
new file mode 100644
index 000000000000..e2e6867852b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel.txt
@@ -0,0 +1,4 @@
+Common display properties
+-------------------------
+
+- rotation: Display rotation in degrees counter clockwise (0,90,180,270)
diff --git a/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt b/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt
new file mode 100644
index 000000000000..eb9501a82e25
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt
@@ -0,0 +1,7 @@
+Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "tianma,tm070jdhg30"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
index 668091f27674..046076c6b277 100644
--- a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
@@ -1,24 +1,39 @@
-Rockchip specific extensions to the Synopsys Designware HDMI
-================================
+Rockchip DWC HDMI TX Encoder
+============================
+
+The HDMI transmitter is a Synopsys DesignWare HDMI 1.4 TX controller IP
+with a companion PHY IP.
+
+These DT bindings follow the Synopsys DWC HDMI TX bindings defined in
+Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt with the
+following device-specific properties.
+
Required properties:
-- compatible: "rockchip,rk3288-dw-hdmi";
-- reg: Physical base address and length of the controller's registers.
-- clocks: phandle to hdmi iahb and isfr clocks.
-- clock-names: should be "iahb" "isfr"
-- rockchip,grf: this soc should set GRF regs to mux vopl/vopb.
+
+- compatible: Shall contain "rockchip,rk3288-dw-hdmi".
+- reg: See dw_hdmi.txt.
+- reg-io-width: See dw_hdmi.txt. Shall be 4.
- interrupts: HDMI interrupt number
-- ports: contain a port node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt. For
- vopb,set the reg = <0> and set the reg = <1> for vopl.
-- reg-io-width: the width of the reg:1,4, the value should be 4 on
- rk3288 platform
+- clocks: See dw_hdmi.txt.
+- clock-names: Shall contain "iahb" and "isfr" as defined in dw_hdmi.txt.
+- ports: See dw_hdmi.txt. The DWC HDMI shall have a single port numbered 0
+ corresponding to the video input of the controller. The port shall have two
+ endpoints, numbered 0 and 1, connected respectively to the vopb and vopl.
+- rockchip,grf: Shall reference the GRF to mux vopl/vopb.
Optional properties
-- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
-- clocks, clock-names: phandle to the HDMI CEC clock, name should be "cec"
+
+- ddc-i2c-bus: The HDMI DDC bus can be connected to either a system I2C master
+ or the functionally-reduced I2C master contained in the DWC HDMI. When
+ connected to a system I2C master this property contains a phandle to that
+ I2C master controller.
+- clock-names: See dw_hdmi.txt. The "cec" clock is optional.
+- clock-names: May contain "cec" as defined in dw_hdmi.txt.
+
Example:
+
hdmi: hdmi@ff980000 {
compatible = "rockchip,rk3288-dw-hdmi";
reg = <0xff980000 0x20000>;
diff --git a/Documentation/devicetree/bindings/display/zte,vou.txt b/Documentation/devicetree/bindings/display/zte,vou.txt
index 740e5bd2e4f7..9c356284232b 100644
--- a/Documentation/devicetree/bindings/display/zte,vou.txt
+++ b/Documentation/devicetree/bindings/display/zte,vou.txt
@@ -49,6 +49,15 @@ Required properties:
"osc_clk"
"xclk"
+* TV Encoder output device
+
+Required properties:
+ - compatible: should be "zte,zx296718-tvenc"
+ - reg: Physical base address and length of the TVENC device IO region
+ - zte,tvenc-power-control: the phandle to SYSCTRL block followed by two
+ integer cells. The first cell is the offset of SYSCTRL register used
+ to control TV Encoder DAC power, and the second cell is the bit mask.
+
Example:
vou: vou@1440000 {
@@ -81,4 +90,10 @@ vou: vou@1440000 {
<&topcrm HDMI_XCLK>;
clock-names = "osc_cec", "osc_clk", "xclk";
};
+
+ tvenc: tvenc@2000 {
+ compatible = "zte,zx296718-tvenc";
+ reg = <0x2000 0x1000>;
+ zte,tvenc-power-control = <&sysctrl 0x170 0x10>;
+ };
};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index f6824fd8fb65..bd0ed3cb4994 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -195,6 +195,7 @@ mpl MPL AG
mqmaker mqmaker Inc.
msi Micro-Star International Co. Ltd.
mti Imagination Technologies Ltd. (formerly MIPS Technologies Inc.)
+multi-inno Multi-Inno Technology Co.,Ltd
mundoreader Mundo Reader S.L.
murata Murata Manufacturing Co., Ltd.
mxicy Macronix International Co., Ltd.
@@ -204,6 +205,7 @@ nec NEC LCD Technologies, Ltd.
neonode Neonode Inc.
netgear NETGEAR
netlogic Broadcom Corporation (formerly NetLogic Microsystems)
+netron-dy Netron DY
netxeon Shenzhen Netxeon Technology CO., LTD
nexbox Nexbox
newhaven Newhaven Display International
@@ -305,6 +307,7 @@ technologic Technologic Systems
terasic Terasic Inc.
thine THine Electronics, Inc.
ti Texas Instruments
+tianma Tianma Micro-electronics Co., Ltd.
tlm Trusted Logic Mobility
topeet Topeet
toradex Toradex AG
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
deleted file mode 100644
index ca44c5820585..000000000000
--- a/Documentation/dma-buf-sharing.txt
+++ /dev/null
@@ -1,482 +0,0 @@
- DMA Buffer Sharing API Guide
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Sumit Semwal
- <sumit dot semwal at linaro dot org>
- <sumit dot semwal at ti dot com>
-
-This document serves as a guide to device-driver writers on what is the dma-buf
-buffer sharing API, how to use it for exporting and using shared buffers.
-
-Any device driver which wishes to be a part of DMA buffer sharing, can do so as
-either the 'exporter' of buffers, or the 'user' of buffers.
-
-Say a driver A wants to use buffers created by driver B, then we call B as the
-exporter, and A as buffer-user.
-
-The exporter
-- implements and manages operations[1] for the buffer
-- allows other users to share the buffer by using dma_buf sharing APIs,
-- manages the details of buffer allocation,
-- decides about the actual backing storage where this allocation happens,
-- takes care of any migration of scatterlist - for all (shared) users of this
- buffer,
-
-The buffer-user
-- is one of (many) sharing users of the buffer.
-- doesn't need to worry about how the buffer is allocated, or where.
-- needs a mechanism to get access to the scatterlist that makes up this buffer
- in memory, mapped into its own address space, so it can access the same area
- of memory.
-
-dma-buf operations for device dma only
---------------------------------------
-
-The dma_buf buffer sharing API usage contains the following steps:
-
-1. Exporter announces that it wishes to export a buffer
-2. Userspace gets the file descriptor associated with the exported buffer, and
- passes it around to potential buffer-users based on use case
-3. Each buffer-user 'connects' itself to the buffer
-4. When needed, buffer-user requests access to the buffer from exporter
-5. When finished with its use, the buffer-user notifies end-of-DMA to exporter
-6. when buffer-user is done using this buffer completely, it 'disconnects'
- itself from the buffer.
-
-
-1. Exporter's announcement of buffer export
-
- The buffer exporter announces its wish to export a buffer. In this, it
- connects its own private buffer data, provides implementation for operations
- that can be performed on the exported dma_buf, and flags for the file
- associated with this buffer. All these fields are filled in struct
- dma_buf_export_info, defined via the DEFINE_DMA_BUF_EXPORT_INFO macro.
-
- Interface:
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info)
- struct dma_buf *dma_buf_export(struct dma_buf_export_info *exp_info)
-
- If this succeeds, dma_buf_export allocates a dma_buf structure, and
- returns a pointer to the same. It also associates an anonymous file with this
- buffer, so it can be exported. On failure to allocate the dma_buf object,
- it returns NULL.
-
- 'exp_name' in struct dma_buf_export_info is the name of exporter - to
- facilitate information while debugging. It is set to KBUILD_MODNAME by
- default, so exporters don't have to provide a specific name, if they don't
- wish to.
-
- DEFINE_DMA_BUF_EXPORT_INFO macro defines the struct dma_buf_export_info,
- zeroes it out and pre-populates exp_name in it.
-
-
-2. Userspace gets a handle to pass around to potential buffer-users
-
- Userspace entity requests for a file-descriptor (fd) which is a handle to the
- anonymous file associated with the buffer. It can then share the fd with other
- drivers and/or processes.
-
- Interface:
- int dma_buf_fd(struct dma_buf *dmabuf, int flags)
-
- This API installs an fd for the anonymous file associated with this buffer;
- returns either 'fd', or error.
-
-3. Each buffer-user 'connects' itself to the buffer
-
- Each buffer-user now gets a reference to the buffer, using the fd passed to
- it.
-
- Interface:
- struct dma_buf *dma_buf_get(int fd)
-
- This API will return a reference to the dma_buf, and increment refcount for
- it.
-
- After this, the buffer-user needs to attach its device with the buffer, which
- helps the exporter to know of device buffer constraints.
-
- Interface:
- struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev)
-
- This API returns reference to an attachment structure, which is then used
- for scatterlist operations. It will optionally call the 'attach' dma_buf
- operation, if provided by the exporter.
-
- The dma-buf sharing framework does the bookkeeping bits related to managing
- the list of all attachments to a buffer.
-
-Until this stage, the buffer-exporter has the option to choose not to actually
-allocate the backing storage for this buffer, but wait for the first buffer-user
-to request use of buffer for allocation.
-
-
-4. When needed, buffer-user requests access to the buffer
-
- Whenever a buffer-user wants to use the buffer for any DMA, it asks for
- access to the buffer using dma_buf_map_attachment API. At least one attach to
- the buffer must have happened before map_dma_buf can be called.
-
- Interface:
- struct sg_table * dma_buf_map_attachment(struct dma_buf_attachment *,
- enum dma_data_direction);
-
- This is a wrapper to dma_buf->ops->map_dma_buf operation, which hides the
- "dma_buf->ops->" indirection from the users of this interface.
-
- In struct dma_buf_ops, map_dma_buf is defined as
- struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
- enum dma_data_direction);
-
- It is one of the buffer operations that must be implemented by the exporter.
- It should return the sg_table containing scatterlist for this buffer, mapped
- into caller's address space.
-
- If this is being called for the first time, the exporter can now choose to
- scan through the list of attachments for this buffer, collate the requirements
- of the attached devices, and choose an appropriate backing storage for the
- buffer.
-
- Based on enum dma_data_direction, it might be possible to have multiple users
- accessing at the same time (for reading, maybe), or any other kind of sharing
- that the exporter might wish to make available to buffer-users.
-
- map_dma_buf() operation can return -EINTR if it is interrupted by a signal.
-
-
-5. When finished, the buffer-user notifies end-of-DMA to exporter
-
- Once the DMA for the current buffer-user is over, it signals 'end-of-DMA' to
- the exporter using the dma_buf_unmap_attachment API.
-
- Interface:
- void dma_buf_unmap_attachment(struct dma_buf_attachment *,
- struct sg_table *);
-
- This is a wrapper to dma_buf->ops->unmap_dma_buf() operation, which hides the
- "dma_buf->ops->" indirection from the users of this interface.
-
- In struct dma_buf_ops, unmap_dma_buf is defined as
- void (*unmap_dma_buf)(struct dma_buf_attachment *,
- struct sg_table *,
- enum dma_data_direction);
-
- unmap_dma_buf signifies the end-of-DMA for the attachment provided. Like
- map_dma_buf, this API also must be implemented by the exporter.
-
-
-6. when buffer-user is done using this buffer, it 'disconnects' itself from the
- buffer.
-
- After the buffer-user has no more interest in using this buffer, it should
- disconnect itself from the buffer:
-
- - it first detaches itself from the buffer.
-
- Interface:
- void dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *dmabuf_attach);
-
- This API removes the attachment from the list in dmabuf, and optionally calls
- dma_buf->ops->detach(), if provided by exporter, for any housekeeping bits.
-
- - Then, the buffer-user returns the buffer reference to exporter.
-
- Interface:
- void dma_buf_put(struct dma_buf *dmabuf);
-
- This API then reduces the refcount for this buffer.
-
- If, as a result of this call, the refcount becomes 0, the 'release' file
- operation related to this fd is called. It calls the dmabuf->ops->release()
- operation in turn, and frees the memory allocated for dmabuf when exported.
-
-NOTES:
-- Importance of attach-detach and {map,unmap}_dma_buf operation pairs
- The attach-detach calls allow the exporter to figure out backing-storage
- constraints for the currently-interested devices. This allows preferential
- allocation, and/or migration of pages across different types of storage
- available, if possible.
-
- Bracketing of DMA access with {map,unmap}_dma_buf operations is essential
- to allow just-in-time backing of storage, and migration mid-way through a
- use-case.
-
-- Migration of backing storage if needed
- If after
- - at least one map_dma_buf has happened,
- - and the backing storage has been allocated for this buffer,
- another new buffer-user intends to attach itself to this buffer, it might
- be allowed, if possible for the exporter.
-
- In case it is allowed by the exporter:
- if the new buffer-user has stricter 'backing-storage constraints', and the
- exporter can handle these constraints, the exporter can just stall on the
- map_dma_buf until all outstanding access is completed (as signalled by
- unmap_dma_buf).
- Once all users have finished accessing and have unmapped this buffer, the
- exporter could potentially move the buffer to the stricter backing-storage,
- and then allow further {map,unmap}_dma_buf operations from any buffer-user
- from the migrated backing-storage.
-
- If the exporter cannot fulfill the backing-storage constraints of the new
- buffer-user device as requested, dma_buf_attach() would return an error to
- denote non-compatibility of the new buffer-sharing request with the current
- buffer.
-
- If the exporter chooses not to allow an attach() operation once a
- map_dma_buf() API has been called, it simply returns an error.
-
-Kernel cpu access to a dma-buf buffer object
---------------------------------------------
-
-The motivation to allow cpu access from the kernel to a dma-buf object from the
-importers side are:
-- fallback operations, e.g. if the devices is connected to a usb bus and the
- kernel needs to shuffle the data around first before sending it away.
-- full transparency for existing users on the importer side, i.e. userspace
- should not notice the difference between a normal object from that subsystem
- and an imported one backed by a dma-buf. This is really important for drm
- opengl drivers that expect to still use all the existing upload/download
- paths.
-
-Access to a dma_buf from the kernel context involves three steps:
-
-1. Prepare access, which invalidate any necessary caches and make the object
- available for cpu access.
-2. Access the object page-by-page with the dma_buf map apis
-3. Finish access, which will flush any necessary cpu caches and free reserved
- resources.
-
-1. Prepare access
-
- Before an importer can access a dma_buf object with the cpu from the kernel
- context, it needs to notify the exporter of the access that is about to
- happen.
-
- Interface:
- int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
-
- This allows the exporter to ensure that the memory is actually available for
- cpu access - the exporter might need to allocate or swap-in and pin the
- backing storage. The exporter also needs to ensure that cpu access is
- coherent for the access direction. The direction can be used by the exporter
- to optimize the cache flushing, i.e. access with a different direction (read
- instead of write) might return stale or even bogus data (e.g. when the
- exporter needs to copy the data to temporary storage).
-
- This step might fail, e.g. in oom conditions.
-
-2. Accessing the buffer
-
- To support dma_buf objects residing in highmem cpu access is page-based using
- an api similar to kmap. Accessing a dma_buf is done in aligned chunks of
- PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which returns
- a pointer in kernel virtual address space. Afterwards the chunk needs to be
- unmapped again. There is no limit on how often a given chunk can be mapped
- and unmapped, i.e. the importer does not need to call begin_cpu_access again
- before mapping the same chunk again.
-
- Interfaces:
- void *dma_buf_kmap(struct dma_buf *, unsigned long);
- void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
-
- There are also atomic variants of these interfaces. Like for kmap they
- facilitate non-blocking fast-paths. Neither the importer nor the exporter (in
- the callback) is allowed to block when using these.
-
- Interfaces:
- void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
- void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
-
- For importers all the restrictions of using kmap apply, like the limited
- supply of kmap_atomic slots. Hence an importer shall only hold onto at most 2
- atomic dma_buf kmaps at the same time (in any given process context).
-
- dma_buf kmap calls outside of the range specified in begin_cpu_access are
- undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
- the partial chunks at the beginning and end but may return stale or bogus
- data outside of the range (in these partial chunks).
-
- Note that these calls need to always succeed. The exporter needs to complete
- any preparations that might fail in begin_cpu_access.
-
- For some cases the overhead of kmap can be too high, a vmap interface
- is introduced. This interface should be used very carefully, as vmalloc
- space is a limited resources on many architectures.
-
- Interfaces:
- void *dma_buf_vmap(struct dma_buf *dmabuf)
- void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
-
- The vmap call can fail if there is no vmap support in the exporter, or if it
- runs out of vmalloc space. Fallback to kmap should be implemented. Note that
- the dma-buf layer keeps a reference count for all vmap access and calls down
- into the exporter's vmap function only when no vmapping exists, and only
- unmaps it once. Protection against concurrent vmap/vunmap calls is provided
- by taking the dma_buf->lock mutex.
-
-3. Finish access
-
- When the importer is done accessing the CPU, it needs to announce this to
- the exporter (to facilitate cache flushing and unpinning of any pinned
- resources). The result of any dma_buf kmap calls after end_cpu_access is
- undefined.
-
- Interface:
- void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
- enum dma_data_direction dir);
-
-
-Direct Userspace Access/mmap Support
-------------------------------------
-
-Being able to mmap an export dma-buf buffer object has 2 main use-cases:
-- CPU fallback processing in a pipeline and
-- supporting existing mmap interfaces in importers.
-
-1. CPU fallback processing in a pipeline
-
- In many processing pipelines it is sometimes required that the cpu can access
- the data in a dma-buf (e.g. for thumbnail creation, snapshots, ...). To avoid
- the need to handle this specially in userspace frameworks for buffer sharing
- it's ideal if the dma_buf fd itself can be used to access the backing storage
- from userspace using mmap.
-
- Furthermore Android's ION framework already supports this (and is otherwise
- rather similar to dma-buf from a userspace consumer side with using fds as
- handles, too). So it's beneficial to support this in a similar fashion on
- dma-buf to have a good transition path for existing Android userspace.
-
- No special interfaces, userspace simply calls mmap on the dma-buf fd, making
- sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always*
- used when the access happens. Note that DMA_BUF_IOCTL_SYNC can fail with
- -EAGAIN or -EINTR, in which case it must be restarted.
-
- Some systems might need some sort of cache coherency management e.g. when
- CPU and GPU domains are being accessed through dma-buf at the same time. To
- circumvent this problem there are begin/end coherency markers, that forward
- directly to existing dma-buf device drivers vfunc hooks. Userspace can make
- use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The sequence
- would be used like following:
- - mmap dma-buf fd
- - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
- to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
- want (with the new data being consumed by the GPU or say scanout device)
- - munmap once you don't need the buffer any more
-
- For correctness and optimal performance, it is always required to use
- SYNC_START and SYNC_END before and after, respectively, when accessing the
- mapped address. Userspace cannot rely on coherent access, even when there
- are systems where it just works without calling these ioctls.
-
-2. Supporting existing mmap interfaces in importers
-
- Similar to the motivation for kernel cpu access it is again important that
- the userspace code of a given importing subsystem can use the same interfaces
- with a imported dma-buf buffer object as with a native buffer object. This is
- especially important for drm where the userspace part of contemporary OpenGL,
- X, and other drivers is huge, and reworking them to use a different way to
- mmap a buffer rather invasive.
-
- The assumption in the current dma-buf interfaces is that redirecting the
- initial mmap is all that's needed. A survey of some of the existing
- subsystems shows that no driver seems to do any nefarious thing like syncing
- up with outstanding asynchronous processing on the device or allocating
- special resources at fault time. So hopefully this is good enough, since
- adding interfaces to intercept pagefaults and allow pte shootdowns would
- increase the complexity quite a bit.
-
- Interface:
- int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
- unsigned long);
-
- If the importing subsystem simply provides a special-purpose mmap call to set
- up a mapping in userspace, calling do_mmap with dma_buf->file will equally
- achieve that for a dma-buf object.
-
-3. Implementation notes for exporters
-
- Because dma-buf buffers have invariant size over their lifetime, the dma-buf
- core checks whether a vma is too large and rejects such mappings. The
- exporter hence does not need to duplicate this check.
-
- Because existing importing subsystems might presume coherent mappings for
- userspace, the exporter needs to set up a coherent mapping. If that's not
- possible, it needs to fake coherency by manually shooting down ptes when
- leaving the cpu domain and flushing caches at fault time. Note that all the
- dma_buf files share the same anon inode, hence the exporter needs to replace
- the dma_buf file stored in vma->vm_file with it's own if pte shootdown is
- required. This is because the kernel uses the underlying inode's address_space
- for vma tracking (and hence pte tracking at shootdown time with
- unmap_mapping_range).
-
- If the above shootdown dance turns out to be too expensive in certain
- scenarios, we can extend dma-buf with a more explicit cache tracking scheme
- for userspace mappings. But the current assumption is that using mmap is
- always a slower path, so some inefficiencies should be acceptable.
-
- Exporters that shoot down mappings (for any reasons) shall not do any
- synchronization at fault time with outstanding device operations.
- Synchronization is an orthogonal issue to sharing the backing storage of a
- buffer and hence should not be handled by dma-buf itself. This is explicitly
- mentioned here because many people seem to want something like this, but if
- different exporters handle this differently, buffer sharing can fail in
- interesting ways depending upong the exporter (if userspace starts depending
- upon this implicit synchronization).
-
-Other Interfaces Exposed to Userspace on the dma-buf FD
-------------------------------------------------------
-
-- Since kernel 3.12 the dma-buf FD supports the llseek system call, but only
- with offset=0 and whence=SEEK_END|SEEK_SET. SEEK_SET is supported to allow
- the usual size discover pattern size = SEEK_END(0); SEEK_SET(0). Every other
- llseek operation will report -EINVAL.
-
- If llseek on dma-buf FDs isn't support the kernel will report -ESPIPE for all
- cases. Userspace can use this to detect support for discovering the dma-buf
- size using llseek.
-
-Miscellaneous notes
--------------------
-
-- Any exporters or users of the dma-buf buffer sharing framework must have
- a 'select DMA_SHARED_BUFFER' in their respective Kconfigs.
-
-- In order to avoid fd leaks on exec, the FD_CLOEXEC flag must be set
- on the file descriptor. This is not just a resource leak, but a
- potential security hole. It could give the newly exec'd application
- access to buffers, via the leaked fd, to which it should otherwise
- not be permitted access.
-
- The problem with doing this via a separate fcntl() call, versus doing it
- atomically when the fd is created, is that this is inherently racy in a
- multi-threaded app[3]. The issue is made worse when it is library code
- opening/creating the file descriptor, as the application may not even be
- aware of the fd's.
-
- To avoid this problem, userspace must have a way to request O_CLOEXEC
- flag be set when the dma-buf fd is created. So any API provided by
- the exporting driver to create a dmabuf fd must provide a way to let
- userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd().
-
-- If an exporter needs to manually flush caches and hence needs to fake
- coherency for mmap support, it needs to be able to zap all the ptes pointing
- at the backing storage. Now linux mm needs a struct address_space associated
- with the struct file stored in vma->vm_file to do that with the function
- unmap_mapping_range. But the dma_buf framework only backs every dma_buf fd
- with the anon_file struct file, i.e. all dma_bufs share the same file.
-
- Hence exporters need to setup their own file (and address_space) association
- by setting vma->vm_file and adjusting vma->vm_pgoff in the dma_buf mmap
- callback. In the specific case of a gem driver the exporter could use the
- shmem file already provided by gem (and set vm_pgoff = 0). Exporters can then
- zap ptes by unmapping the corresponding range of the struct address_space
- associated with their own file.
-
-References:
-[1] struct dma_buf_ops in include/linux/dma-buf.h
-[2] All interfaces mentioned above defined in include/linux/dma-buf.h
-[3] https://lwn.net/Articles/236486/
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index a9b457a4b949..31671b469627 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -17,6 +17,98 @@ shared or exclusive fence(s) associated with the buffer.
Shared DMA Buffers
------------------
+This document serves as a guide to device-driver writers on what is the dma-buf
+buffer sharing API, how to use it for exporting and using shared buffers.
+
+Any device driver which wishes to be a part of DMA buffer sharing, can do so as
+either the 'exporter' of buffers, or the 'user' or 'importer' of buffers.
+
+Say a driver A wants to use buffers created by driver B, then we call B as the
+exporter, and A as buffer-user/importer.
+
+The exporter
+
+ - implements and manages operations in :c:type:`struct dma_buf_ops
+ <dma_buf_ops>` for the buffer,
+ - allows other users to share the buffer by using dma_buf sharing APIs,
+ - manages the details of buffer allocation, wrapped int a :c:type:`struct
+ dma_buf <dma_buf>`,
+ - decides about the actual backing storage where this allocation happens,
+ - and takes care of any migration of scatterlist - for all (shared) users of
+ this buffer.
+
+The buffer-user
+
+ - is one of (many) sharing users of the buffer.
+ - doesn't need to worry about how the buffer is allocated, or where.
+ - and needs a mechanism to get access to the scatterlist that makes up this
+ buffer in memory, mapped into its own address space, so it can access the
+ same area of memory. This interface is provided by :c:type:`struct
+ dma_buf_attachment <dma_buf_attachment>`.
+
+Any exporters or users of the dma-buf buffer sharing framework must have a
+'select DMA_SHARED_BUFFER' in their respective Kconfigs.
+
+Userspace Interface Notes
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Mostly a DMA buffer file descriptor is simply an opaque object for userspace,
+and hence the generic interface exposed is very minimal. There's a few things to
+consider though:
+
+- Since kernel 3.12 the dma-buf FD supports the llseek system call, but only
+ with offset=0 and whence=SEEK_END|SEEK_SET. SEEK_SET is supported to allow
+ the usual size discover pattern size = SEEK_END(0); SEEK_SET(0). Every other
+ llseek operation will report -EINVAL.
+
+ If llseek on dma-buf FDs isn't support the kernel will report -ESPIPE for all
+ cases. Userspace can use this to detect support for discovering the dma-buf
+ size using llseek.
+
+- In order to avoid fd leaks on exec, the FD_CLOEXEC flag must be set
+ on the file descriptor. This is not just a resource leak, but a
+ potential security hole. It could give the newly exec'd application
+ access to buffers, via the leaked fd, to which it should otherwise
+ not be permitted access.
+
+ The problem with doing this via a separate fcntl() call, versus doing it
+ atomically when the fd is created, is that this is inherently racy in a
+ multi-threaded app[3]. The issue is made worse when it is library code
+ opening/creating the file descriptor, as the application may not even be
+ aware of the fd's.
+
+ To avoid this problem, userspace must have a way to request O_CLOEXEC
+ flag be set when the dma-buf fd is created. So any API provided by
+ the exporting driver to create a dmabuf fd must provide a way to let
+ userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd().
+
+- Memory mapping the contents of the DMA buffer is also supported. See the
+ discussion below on `CPU Access to DMA Buffer Objects`_ for the full details.
+
+- The DMA buffer FD is also pollable, see `Fence Poll Support`_ below for
+ details.
+
+Basic Operation and Device DMA Access
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/dma-buf.c
+ :doc: dma buf device access
+
+CPU Access to DMA Buffer Objects
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/dma-buf.c
+ :doc: cpu access
+
+Fence Poll Support
+~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/dma-buf.c
+ :doc: fence polling
+
+Kernel Functions and Structures Reference
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
.. kernel-doc:: drivers/dma-buf/dma-buf.c
:export:
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 0c9abdc0ee31..4d4068855ec4 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -48,11 +48,17 @@ CRTC Abstraction
================
.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
- :export:
+ :doc: overview
+
+CRTC Functions Reference
+--------------------------------
.. kernel-doc:: include/drm/drm_crtc.h
:internal:
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+ :export:
+
Frame Buffer Abstraction
========================
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index cb5daffcd6be..f5760b140f13 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -34,25 +34,26 @@ TTM initialization
------------------
**Warning**
-
This section is outdated.
-Drivers wishing to support TTM must fill out a drm_bo_driver
-structure. The structure contains several fields with function pointers
-for initializing the TTM, allocating and freeing memory, waiting for
-command completion and fence synchronization, and memory migration. See
-the radeon_ttm.c file for an example of usage.
+Drivers wishing to support TTM must pass a filled :c:type:`ttm_bo_driver
+<ttm_bo_driver>` structure to ttm_bo_device_init, together with an
+initialized global reference to the memory manager. The ttm_bo_driver
+structure contains several fields with function pointers for
+initializing the TTM, allocating and freeing memory, waiting for command
+completion and fence synchronization, and memory migration.
-The ttm_global_reference structure is made up of several fields:
+The :c:type:`struct drm_global_reference <drm_global_reference>` is made
+up of several fields:
.. code-block:: c
- struct ttm_global_reference {
+ struct drm_global_reference {
enum ttm_global_types global_type;
size_t size;
void *object;
- int (*init) (struct ttm_global_reference *);
- void (*release) (struct ttm_global_reference *);
+ int (*init) (struct drm_global_reference *);
+ void (*release) (struct drm_global_reference *);
};
@@ -76,6 +77,12 @@ ttm_bo_global_release(), respectively. Also, like the previous
object, ttm_global_item_ref() is used to create an initial reference
count for the TTM, which will call your initialization function.
+See the radeon_ttm.c file for an example of usage.
+
+.. kernel-doc:: drivers/gpu/drm/drm_global.c
+ :export:
+
+
The Graphics Execution Manager (GEM)
====================================
@@ -284,10 +291,17 @@ To use :c:func:`drm_gem_mmap()`, drivers must fill the struct
:c:type:`struct drm_driver <drm_driver>` gem_vm_ops field
with a pointer to VM operations.
-struct vm_operations_struct \*gem_vm_ops struct
-vm_operations_struct { void (\*open)(struct vm_area_struct \* area);
-void (\*close)(struct vm_area_struct \* area); int (\*fault)(struct
-vm_area_struct \*vma, struct vm_fault \*vmf); };
+The VM operations is a :c:type:`struct vm_operations_struct <vm_operations_struct>`
+made up of several fields, the more interesting ones being:
+
+.. code-block:: c
+
+ struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ int (*fault)(struct vm_fault *vmf);
+ };
+
The open and close operations must update the GEM object reference
count. Drivers can use the :c:func:`drm_gem_vm_open()` and
@@ -303,6 +317,17 @@ created.
Drivers that want to map the GEM object upfront instead of handling page
faults can implement their own mmap file operation handler.
+For platforms without MMU the GEM core provides a helper method
+:c:func:`drm_gem_cma_get_unmapped_area`. The mmap() routines will call
+this to get a proposed address for the mapping.
+
+To use :c:func:`drm_gem_cma_get_unmapped_area`, drivers must fill the
+struct :c:type:`struct file_operations <file_operations>` get_unmapped_area
+field with a pointer on :c:func:`drm_gem_cma_get_unmapped_area`.
+
+More detailed information about get_unmapped_area can be found in
+Documentation/nommu-mmap.txt
+
Memory Coherency
----------------
@@ -442,7 +467,7 @@ LRU Scan/Eviction Support
-------------------------
.. kernel-doc:: drivers/gpu/drm/drm_mm.c
- :doc: lru scan roaster
+ :doc: lru scan roster
DRM MM Range Allocator Function References
------------------------------------------
@@ -452,3 +477,9 @@ DRM MM Range Allocator Function References
.. kernel-doc:: include/drm/drm_mm.h
:internal:
+
+DRM Cache Handling
+==================
+
+.. kernel-doc:: drivers/gpu/drm/drm_cache.c
+ :export:
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index de3ac9f90f8f..fcc228ef5bc4 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -156,8 +156,12 @@ other hand, a driver requires shared state between clients which is
visible to user-space and accessible beyond open-file boundaries, they
cannot support render nodes.
+
+Testing and validation
+======================
+
Validating changes with IGT
-===========================
+---------------------------
There's a collection of tests that aims to cover the whole functionality of
DRM drivers and that can be used to check that changes to DRM drivers or the
@@ -193,6 +197,12 @@ run-tests.sh is a wrapper around piglit that will execute the tests matching
the -t options. A report in HTML format will be available in
./results/html/index.html. Results can be compared with piglit.
+Display CRC Support
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_debugfs_crc.c
+ :doc: CRC ABI
+
VBlank event handling
=====================
@@ -209,16 +219,3 @@ DRM_IOCTL_MODESET_CTL
mode setting, since on many devices the vertical blank counter is
reset to 0 at some point during modeset. Modern drivers should not
call this any more since with kernel mode setting it is a no-op.
-
-This second part of the GPU Driver Developer's Guide documents driver
-code, implementation details and also all the driver-specific userspace
-interfaces. Especially since all hardware-acceleration interfaces to
-userspace are driver specific for efficiency and other reasons these
-interfaces can be rather substantial. Hence every driver has its own
-chapter.
-
-Testing and validation
-======================
-
-.. kernel-doc:: drivers/gpu/drm/drm_debugfs_crc.c
- :doc: CRC ABI
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 7fb605af090e..b0d6709b8600 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -222,6 +222,18 @@ Video BIOS Table (VBT)
.. kernel-doc:: drivers/gpu/drm/i915/intel_vbt_defs.h
:internal:
+Display PLLs
+------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dpll_mgr.c
+ :doc: Display PLLs
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dpll_mgr.c
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dpll_mgr.h
+ :internal:
+
Memory Management and Command Submission
========================================
@@ -365,4 +377,95 @@ switch_mm
.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
:doc: switch_mm tracepoint
+Perf
+====
+
+Overview
+--------
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :doc: i915 Perf Overview
+
+Comparison with Core Perf
+-------------------------
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :doc: i915 Perf History and Comparison with Core Perf
+
+i915 Driver Entry Points
+------------------------
+
+This section covers the entrypoints exported outside of i915_perf.c to
+integrate with drm/i915 and to handle the `DRM_I915_PERF_OPEN` ioctl.
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_init
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_fini
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_register
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_unregister
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_open_ioctl
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_release
+
+i915 Perf Stream
+----------------
+
+This section covers the stream-semantics-agnostic structures and functions
+for representing an i915 perf stream FD and associated file operations.
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h
+ :functions: i915_perf_stream
+.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h
+ :functions: i915_perf_stream_ops
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: read_properties_unlocked
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_open_ioctl_locked
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_destroy_locked
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_read
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_ioctl
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_enable_locked
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_disable_locked
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_poll
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_poll_locked
+
+i915 Perf Observation Architecture Stream
+-----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h
+ :functions: i915_oa_ops
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_oa_stream_init
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_oa_read
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_oa_stream_enable
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_oa_stream_disable
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_oa_wait_unlocked
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_oa_poll_wait
+
+All i915 Perf Internals
+-----------------------
+
+This section simply includes all currently documented i915 perf internals, in
+no particular order, but may include some more minor utilities or platform
+specific details than found in the more high-level sections.
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :internal:
+
.. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/drm/i915/i915_irq.c
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index 367d7c36b8e9..f81278a7c2cc 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -11,6 +11,7 @@ Linux GPU Driver Developer's Guide
drm-kms-helpers
drm-uapi
i915
+ tinydrm
vga-switcheroo
vgaarbiter
diff --git a/Documentation/gpu/introduction.rst b/Documentation/gpu/introduction.rst
index 1903595b5310..eb284eb748ba 100644
--- a/Documentation/gpu/introduction.rst
+++ b/Documentation/gpu/introduction.rst
@@ -23,13 +23,12 @@ For consistency this documentation uses American English. Abbreviations
are written as all-uppercase, for example: DRM, KMS, IOCTL, CRTC, and so
on. To aid in reading, documentations make full use of the markup
characters kerneldoc provides: @parameter for function parameters,
-@member for structure members, &structure to reference structures and
-function() for functions. These all get automatically hyperlinked if
-kerneldoc for the referenced objects exists. When referencing entries in
-function vtables please use ->vfunc(). Note that kerneldoc does not
-support referencing struct members directly, so please add a reference
-to the vtable struct somewhere in the same paragraph or at least
-section.
+@member for structure members (within the same structure), &struct structure to
+reference structures and function() for functions. These all get automatically
+hyperlinked if kerneldoc for the referenced objects exists. When referencing
+entries in function vtables (and structure members in general) please use
+&vtable_name.vfunc. Unfortunately this does not yet yield a direct link to the
+member, only the structure.
Except in special situations (to separate locked from unlocked variants)
locking requirements for functions aren't documented in the kerneldoc.
@@ -49,3 +48,5 @@ section name should be all upper-case or not, and whether it should end
in a colon or not. Go with the file-local style. Other common section
names are "Notes" with information for dangerous or tricky corner cases,
and "FIXME" where the interface could be cleaned up.
+
+Also read the :ref:`guidelines for the kernel documentation at large <doc_guide>`.
diff --git a/Documentation/gpu/tinydrm.rst b/Documentation/gpu/tinydrm.rst
new file mode 100644
index 000000000000..a913644bfc19
--- /dev/null
+++ b/Documentation/gpu/tinydrm.rst
@@ -0,0 +1,42 @@
+==========================
+drm/tinydrm Driver library
+==========================
+
+.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+ :doc: overview
+
+Core functionality
+==================
+
+.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+ :doc: core
+
+.. kernel-doc:: include/drm/tinydrm/tinydrm.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+ :export:
+
+.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+ :export:
+
+Additional helpers
+==================
+
+.. kernel-doc:: include/drm/tinydrm/tinydrm-helpers.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+ :export:
+
+MIPI DBI Compatible Controllers
+===============================
+
+.. kernel-doc:: drivers/gpu/drm/tinydrm/mipi-dbi.c
+ :doc: overview
+
+.. kernel-doc:: include/drm/tinydrm/mipi-dbi.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/tinydrm/mipi-dbi.c
+ :export:
diff --git a/Documentation/sound/hd-audio/dp-mst.rst b/Documentation/sound/hd-audio/dp-mst.rst
index 58b72437e6c3..1617459e332f 100644
--- a/Documentation/sound/hd-audio/dp-mst.rst
+++ b/Documentation/sound/hd-audio/dp-mst.rst
@@ -19,6 +19,23 @@ PCM
===
To be added
+Pin Initialization
+==================
+Each pin may have several device entries (virtual pins). On Intel platform,
+the device entries number is dynamically changed. If DP MST hub is connected,
+it is in DP MST mode, and the device entries number is 3. Otherwise, the
+device entries number is 1.
+
+To simplify the implementation, all the device entries will be initialized
+when bootup no matter whether it is in DP MST mode or not.
+
+Connection list
+===============
+DP MST reuses connection list code. The code can be reused because
+device entries on the same pin have the same connection list.
+
+This means DP MST gets the device entry connection list without the
+device entry setting.
Jack
====
diff --git a/MAINTAINERS b/MAINTAINERS
index 1b3a9cb8d7f1..4b03c4701030 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4031,7 +4031,7 @@ F: drivers/dma-buf/
F: include/linux/dma-buf*
F: include/linux/reservation.h
F: include/linux/*fence.h
-F: Documentation/dma-buf-sharing.txt
+F: Documentation/driver-api/dma-buf.rst
T: git git://anongit.freedesktop.org/drm/drm-misc
SYNC FILE FRAMEWORK
@@ -4041,6 +4041,7 @@ S: Maintained
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
F: drivers/dma-buf/sync_*
+F: drivers/dma-buf/dma-fence*
F: drivers/dma-buf/sw_sync.c
F: include/linux/sync_file.h
F: include/uapi/linux/sync_file.h
@@ -4315,6 +4316,12 @@ S: Supported
F: drivers/gpu/drm/mediatek/
F: Documentation/devicetree/bindings/display/mediatek/
+DRM DRIVER FOR MI0283QT
+M: Noralf Trønnes <noralf@tronnes.org>
+S: Maintained
+F: drivers/gpu/drm/tinydrm/mi0283qt.c
+F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
+
DRM DRIVER FOR MSM ADRENO GPU
M: Rob Clark <robdclark@gmail.com>
L: linux-arm-msm@vger.kernel.org
diff --git a/arch/blackfin/include/asm/vga.h b/arch/blackfin/include/asm/vga.h
new file mode 100644
index 000000000000..89d82fd8fcf1
--- /dev/null
+++ b/arch/blackfin/include/asm/vga.h
@@ -0,0 +1 @@
+#include <asm-generic/vga.h>
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 0f7d28a98b9a..9702c78f458d 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1420,8 +1420,10 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
- phys_addr_t *mappable_base, u64 *mappable_end)
+void intel_gtt_get(u64 *gtt_total,
+ u32 *stolen_size,
+ phys_addr_t *mappable_base,
+ u64 *mappable_end)
{
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
*stolen_size = intel_private.stolen_size;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index e72e64484131..718f832a5c71 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -124,6 +124,28 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
return base + offset;
}
+/**
+ * DOC: fence polling
+ *
+ * To support cross-device and cross-driver synchronization of buffer access
+ * implicit fences (represented internally in the kernel with &struct fence) can
+ * be attached to a &dma_buf. The glue for that and a few related things are
+ * provided in the &reservation_object structure.
+ *
+ * Userspace can query the state of these implicitly tracked fences using poll()
+ * and related system calls:
+ *
+ * - Checking for POLLIN, i.e. read access, can be use to query the state of the
+ * most recent write or exclusive fence.
+ *
+ * - Checking for POLLOUT, i.e. write access, can be used to query the state of
+ * all attached fences, shared and exclusive ones.
+ *
+ * Note that this only signals the completion of the respective fences, i.e. the
+ * DMA transfers are complete. Cache flushing and any other necessary
+ * preparations before CPU access can begin still need to happen.
+ */
+
static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
@@ -314,19 +336,52 @@ static inline int is_dma_buf_file(struct file *file)
}
/**
+ * DOC: dma buf device access
+ *
+ * For device DMA access to a shared DMA buffer the usual sequence of operations
+ * is fairly simple:
+ *
+ * 1. The exporter defines his exporter instance using
+ * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
+ * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
+ * as a file descriptor by calling dma_buf_fd().
+ *
+ * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
+ * to share with: First the filedescriptor is converted to a &dma_buf using
+ * dma_buf_get(). The the buffer is attached to the device using
+ * dma_buf_attach().
+ *
+ * Up to this stage the exporter is still free to migrate or reallocate the
+ * backing storage.
+ *
+ * 3. Once the buffer is attached to all devices userspace can inniate DMA
+ * access to the shared buffer. In the kernel this is done by calling
+ * dma_buf_map_attachment() and dma_buf_unmap_attachment().
+ *
+ * 4. Once a driver is done with a shared buffer it needs to call
+ * dma_buf_detach() (after cleaning up any mappings) and then release the
+ * reference acquired with dma_buf_get by calling dma_buf_put().
+ *
+ * For the detailed semantics exporters are expected to implement see
+ * &dma_buf_ops.
+ */
+
+/**
* dma_buf_export - Creates a new dma_buf, and associates an anon file
* with this buffer, so it can be exported.
* Also connect the allocator specific data and ops to the buffer.
* Additionally, provide a name string for exporter; useful in debugging.
*
* @exp_info: [in] holds all the export related information provided
- * by the exporter. see struct dma_buf_export_info
+ * by the exporter. see &struct dma_buf_export_info
* for further details.
*
* Returns, on success, a newly created dma_buf object, which wraps the
* supplied private data and operations for dma_buf_ops. On either missing
* ops, or error in allocating struct dma_buf, will return negative error.
*
+ * For most cases the easiest way to create @exp_info is through the
+ * %DEFINE_DMA_BUF_EXPORT_INFO macro.
*/
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
{
@@ -458,7 +513,11 @@ EXPORT_SYMBOL_GPL(dma_buf_get);
* dma_buf_put - decreases refcount of the buffer
* @dmabuf: [in] buffer to reduce refcount of
*
- * Uses file's refcounting done implicitly by fput()
+ * Uses file's refcounting done implicitly by fput().
+ *
+ * If, as a result of this call, the refcount becomes 0, the 'release' file
+ * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
+ * in turn, and frees the memory allocated for dmabuf when exported.
*/
void dma_buf_put(struct dma_buf *dmabuf)
{
@@ -475,8 +534,17 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
*
- * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
- * error.
+ * Returns struct dma_buf_attachment pointer for this attachment. Attachments
+ * must be cleaned up by calling dma_buf_detach().
+ *
+ * Returns:
+ *
+ * A pointer to newly created &dma_buf_attachment on success, or a negative
+ * error code wrapped into a pointer on failure.
+ *
+ * Note that this can fail if the backing storage of @dmabuf is in a place not
+ * accessible to @dev, and cannot be moved to a more suitable place. This is
+ * indicated with the error code -EBUSY.
*/
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
@@ -519,6 +587,7 @@ EXPORT_SYMBOL_GPL(dma_buf_attach);
* @dmabuf: [in] buffer to detach from.
* @attach: [in] attachment to be detached; is free'd after this call.
*
+ * Clean up a device attachment obtained by calling dma_buf_attach().
*/
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
{
@@ -543,7 +612,12 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
* @direction: [in] direction of DMA transfer
*
* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
- * on error.
+ * on error. May return -EINTR if it is interrupted by a signal.
+ *
+ * A mapping must be unmapped again using dma_buf_map_attachment(). Note that
+ * the underlying backing storage is pinned for as long as a mapping exists,
+ * therefore users/importers should not hold onto a mapping for undue amounts of
+ * time.
*/
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
@@ -571,6 +645,7 @@ EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
* @sg_table: [in] scatterlist info of the buffer to unmap
* @direction: [in] direction of DMA transfer
*
+ * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
*/
void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
@@ -586,6 +661,122 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+/**
+ * DOC: cpu access
+ *
+ * There are mutliple reasons for supporting CPU access to a dma buffer object:
+ *
+ * - Fallback operations in the kernel, for example when a device is connected
+ * over USB and the kernel needs to shuffle the data around first before
+ * sending it away. Cache coherency is handled by braketing any transactions
+ * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
+ * access.
+ *
+ * To support dma_buf objects residing in highmem cpu access is page-based
+ * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
+ * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
+ * returns a pointer in kernel virtual address space. Afterwards the chunk
+ * needs to be unmapped again. There is no limit on how often a given chunk
+ * can be mapped and unmapped, i.e. the importer does not need to call
+ * begin_cpu_access again before mapping the same chunk again.
+ *
+ * Interfaces::
+ * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
+ * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
+ *
+ * There are also atomic variants of these interfaces. Like for kmap they
+ * facilitate non-blocking fast-paths. Neither the importer nor the exporter
+ * (in the callback) is allowed to block when using these.
+ *
+ * Interfaces::
+ * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
+ * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
+ *
+ * For importers all the restrictions of using kmap apply, like the limited
+ * supply of kmap_atomic slots. Hence an importer shall only hold onto at
+ * max 2 atomic dma_buf kmaps at the same time (in any given process context).
+ *
+ * dma_buf kmap calls outside of the range specified in begin_cpu_access are
+ * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
+ * the partial chunks at the beginning and end but may return stale or bogus
+ * data outside of the range (in these partial chunks).
+ *
+ * Note that these calls need to always succeed. The exporter needs to
+ * complete any preparations that might fail in begin_cpu_access.
+ *
+ * For some cases the overhead of kmap can be too high, a vmap interface
+ * is introduced. This interface should be used very carefully, as vmalloc
+ * space is a limited resources on many architectures.
+ *
+ * Interfaces::
+ * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
+ * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
+ *
+ * The vmap call can fail if there is no vmap support in the exporter, or if
+ * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
+ * that the dma-buf layer keeps a reference count for all vmap access and
+ * calls down into the exporter's vmap function only when no vmapping exists,
+ * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
+ * provided by taking the dma_buf->lock mutex.
+ *
+ * - For full compatibility on the importer side with existing userspace
+ * interfaces, which might already support mmap'ing buffers. This is needed in
+ * many processing pipelines (e.g. feeding a software rendered image into a
+ * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
+ * framework already supported this and for DMA buffer file descriptors to
+ * replace ION buffers mmap support was needed.
+ *
+ * There is no special interfaces, userspace simply calls mmap on the dma-buf
+ * fd. But like for CPU access there's a need to braket the actual access,
+ * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
+ * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
+ * be restarted.
+ *
+ * Some systems might need some sort of cache coherency management e.g. when
+ * CPU and GPU domains are being accessed through dma-buf at the same time.
+ * To circumvent this problem there are begin/end coherency markers, that
+ * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
+ * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
+ * sequence would be used like following:
+ *
+ * - mmap dma-buf fd
+ * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
+ * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
+ * want (with the new data being consumed by say the GPU or the scanout
+ * device)
+ * - munmap once you don't need the buffer any more
+ *
+ * For correctness and optimal performance, it is always required to use
+ * SYNC_START and SYNC_END before and after, respectively, when accessing the
+ * mapped address. Userspace cannot rely on coherent access, even when there
+ * are systems where it just works without calling these ioctls.
+ *
+ * - And as a CPU fallback in userspace processing pipelines.
+ *
+ * Similar to the motivation for kernel cpu access it is again important that
+ * the userspace code of a given importing subsystem can use the same
+ * interfaces with a imported dma-buf buffer object as with a native buffer
+ * object. This is especially important for drm where the userspace part of
+ * contemporary OpenGL, X, and other drivers is huge, and reworking them to
+ * use a different way to mmap a buffer rather invasive.
+ *
+ * The assumption in the current dma-buf interfaces is that redirecting the
+ * initial mmap is all that's needed. A survey of some of the existing
+ * subsystems shows that no driver seems to do any nefarious thing like
+ * syncing up with outstanding asynchronous processing on the device or
+ * allocating special resources at fault time. So hopefully this is good
+ * enough, since adding interfaces to intercept pagefaults and allow pte
+ * shootdowns would increase the complexity quite a bit.
+ *
+ * Interface::
+ * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
+ * unsigned long);
+ *
+ * If the importing subsystem simply provides a special-purpose mmap call to
+ * set up a mapping in userspace, calling do_mmap with dma_buf->file will
+ * equally achieve that for a dma-buf object.
+ */
+
static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
@@ -611,6 +802,10 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
* @dmabuf: [in] buffer to prepare cpu access for.
* @direction: [in] length of range for cpu access.
*
+ * After the cpu access is complete the caller should call
+ * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
+ * it guaranteed to be coherent with other DMA access.
+ *
* Can return negative error values, returns 0 on success.
*/
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -643,6 +838,8 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
* @dmabuf: [in] buffer to complete cpu access for.
* @direction: [in] length of range for cpu access.
*
+ * This terminates CPU access started with dma_buf_begin_cpu_access().
+ *
* Can return negative error values, returns 0 on success.
*/
int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 0212af7997d9..d1f1f456f5c4 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -28,6 +28,7 @@
EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
/*
* fence context counter: each execution context should have its own
@@ -282,6 +283,31 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
EXPORT_SYMBOL(dma_fence_add_callback);
/**
+ * dma_fence_get_status - returns the status upon completion
+ * @fence: [in] the dma_fence to query
+ *
+ * This wraps dma_fence_get_status_locked() to return the error status
+ * condition on a signaled fence. See dma_fence_get_status_locked() for more
+ * details.
+ *
+ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
+ * been signaled without an error condition, or a negative error code
+ * if the fence has been completed in err.
+ */
+int dma_fence_get_status(struct dma_fence *fence)
+{
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(fence->lock, flags);
+ status = dma_fence_get_status_locked(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return status;
+}
+EXPORT_SYMBOL(dma_fence_get_status);
+
+/**
* dma_fence_remove_callback - remove a callback from the signaling list
* @fence: [in] the fence to wait on
* @cb: [in] the callback to remove
@@ -541,6 +567,7 @@ dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
fence->context = context;
fence->seqno = seqno;
fence->flags = 0UL;
+ fence->error = 0;
trace_dma_fence_init(fence);
}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 48b20e34fb6d..c769dc653b34 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -62,30 +62,29 @@ void sync_file_debug_remove(struct sync_file *sync_file)
static const char *sync_status_str(int status)
{
- if (status == 0)
- return "signaled";
+ if (status < 0)
+ return "error";
if (status > 0)
- return "active";
+ return "signaled";
- return "error";
+ return "active";
}
static void sync_print_fence(struct seq_file *s,
struct dma_fence *fence, bool show)
{
- int status = 1;
struct sync_timeline *parent = dma_fence_parent(fence);
+ int status;
- if (dma_fence_is_signaled_locked(fence))
- status = fence->status;
+ status = dma_fence_get_status_locked(fence);
seq_printf(s, " %s%sfence %s",
show ? parent->name : "",
show ? "_" : "",
sync_status_str(status));
- if (status <= 0) {
+ if (status) {
struct timespec64 ts64 =
ktime_to_timespec64(fence->timestamp);
@@ -136,7 +135,7 @@ static void sync_print_sync_file(struct seq_file *s,
int i;
seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
- sync_status_str(!dma_fence_is_signaled(sync_file->fence)));
+ sync_status_str(dma_fence_get_status(sync_file->fence)));
if (dma_fence_is_array(sync_file->fence)) {
struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 6d802f2d2881..2321035f6204 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -67,9 +67,10 @@ static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
* sync_file_create() - creates a sync file
* @fence: fence to add to the sync_fence
*
- * Creates a sync_file containg @fence. Once this is called, the sync_file
- * takes ownership of @fence. The sync_file can be released with
- * fput(sync_file->file). Returns the sync_file or NULL in case of error.
+ * Creates a sync_file containg @fence. This function acquires and additional
+ * reference of @fence for the newly-created &sync_file, if it succeeds. The
+ * sync_file can be released with fput(sync_file->file). Returns the
+ * sync_file or NULL in case of error.
*/
struct sync_file *sync_file_create(struct dma_fence *fence)
{
@@ -90,13 +91,6 @@ struct sync_file *sync_file_create(struct dma_fence *fence)
}
EXPORT_SYMBOL(sync_file_create);
-/**
- * sync_file_fdget() - get a sync_file from an fd
- * @fd: fd referencing a fence
- *
- * Ensures @fd references a valid sync_file, increments the refcount of the
- * backing file. Returns the sync_file or NULL in case of error.
- */
static struct sync_file *sync_file_fdget(int fd)
{
struct file *file = fget(fd);
@@ -379,10 +373,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
sizeof(info->obj_name));
strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
sizeof(info->driver_name));
- if (dma_fence_is_signaled(fence))
- info->status = fence->status >= 0 ? 1 : fence->status;
- else
- info->status = 0;
+
+ info->status = dma_fence_get_status(fence);
info->timestamp_ns = ktime_to_ns(fence->timestamp);
}
@@ -468,4 +460,3 @@ static const struct file_operations sync_file_fops = {
.unlocked_ioctl = sync_file_ioctl,
.compat_ioctl = sync_file_ioctl,
};
-
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index ebfe8404c25f..88e01e08e279 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
#
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
- depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
+ depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select HDMI
select FB_CMDLINE
select I2C
@@ -48,6 +48,21 @@ config DRM_DEBUG_MM
If in doubt, say "N".
+config DRM_DEBUG_MM_SELFTEST
+ tristate "kselftests for DRM range manager (struct drm_mm)"
+ depends on DRM
+ depends on DEBUG_KERNEL
+ select PRIME_NUMBERS
+ select DRM_LIB_RANDOM
+ default n
+ help
+ This option provides a kernel module that can be used to test
+ the DRM range manager (drm_mm) and its API. This option is not
+ useful for distributions or general kernels, but only for kernel
+ developers working on DRM and associated drivers.
+
+ If in doubt, say "N".
+
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -98,7 +113,7 @@ config DRM_LOAD_EDID_FIRMWARE
config DRM_TTM
tristate
- depends on DRM
+ depends on DRM && MMU
help
GPU memory management subsystem for devices with multiple
GPU memory types. Will be enabled automatically if a device driver
@@ -121,13 +136,17 @@ config DRM_KMS_CMA_HELPER
help
Choose this if you need the KMS CMA helper functions
+config DRM_VM
+ bool
+ depends on DRM && MMU
+
source "drivers/gpu/drm/i2c/Kconfig"
source "drivers/gpu/drm/arm/Kconfig"
config DRM_RADEON
tristate "ATI Radeon"
- depends on DRM && PCI
+ depends on DRM && PCI && MMU
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
@@ -147,7 +166,7 @@ source "drivers/gpu/drm/radeon/Kconfig"
config DRM_AMDGPU
tristate "AMD GPU"
- depends on DRM && PCI
+ depends on DRM && PCI && MMU
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
@@ -244,11 +263,14 @@ source "drivers/gpu/drm/mxsfb/Kconfig"
source "drivers/gpu/drm/meson/Kconfig"
+source "drivers/gpu/drm/tinydrm/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
bool "Enable legacy drivers (DANGEROUS)"
- depends on DRM
+ depends on DRM && MMU
+ select DRM_VM
help
Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
APIs to user-space, which can be used to circumvent access
@@ -321,3 +343,7 @@ config DRM_SAVAGE
chipset. If M is selected the module will be called savage.
endif # DRM_LEGACY
+
+config DRM_LIB_RANDOM
+ bool
+ default n
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index b9ae4280de9d..3ee95793d122 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
+ drm_lock.o drm_memory.o drm_drv.o \
drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
@@ -18,6 +18,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o
+drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
+drm-$(CONFIG_DRM_VM) += drm_vm.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_PCI) += ati_pcigart.o
@@ -37,6 +39,7 @@ drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
+obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/
CFLAGS_drm_trace_points.o := -I$(src)
@@ -91,3 +94,4 @@ obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/
obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
+obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 41bd2bf28f4c..2814aad81752 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
- amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -34,7 +34,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
amdgpu-y += \
- vi.o
+ vi.o mxgpu_vi.o
# add GMC block
amdgpu-y += \
@@ -52,8 +52,7 @@ amdgpu-y += \
# add SMC block
amdgpu-y += \
amdgpu_dpm.o \
- amdgpu_powerplay.o \
- cz_smc.o cz_dpm.o
+ amdgpu_powerplay.o
# add DCE block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 63208e5c1588..c1b913541739 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -91,7 +91,6 @@ extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
-extern int amdgpu_powerplay;
extern int amdgpu_no_evict;
extern int amdgpu_direct_gma_size;
extern unsigned amdgpu_pcie_gen_cap;
@@ -184,12 +183,18 @@ enum amdgpu_thermal_irq {
AMDGPU_THERMAL_IRQ_LAST
};
+enum amdgpu_kiq_irq {
+ AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
+ AMDGPU_CP_KIQ_IRQ_LAST
+};
+
int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state);
int amdgpu_set_powergating_state(struct amdgpu_device *adev,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
+void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
int amdgpu_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
bool amdgpu_is_idle(struct amdgpu_device *adev,
@@ -352,7 +357,7 @@ struct amdgpu_bo_va_mapping {
struct list_head list;
struct interval_tree_node it;
uint64_t offset;
- uint32_t flags;
+ uint64_t flags;
};
/* bo virtual addresses in a specific vm */
@@ -776,14 +781,20 @@ struct amdgpu_mec {
u32 num_queue;
};
+struct amdgpu_kiq {
+ u64 eop_gpu_addr;
+ struct amdgpu_bo *eop_obj;
+ struct amdgpu_ring ring;
+ struct amdgpu_irq_src irq;
+};
+
/*
* GPU scratch registers structures, functions & helpers
*/
struct amdgpu_scratch {
unsigned num_reg;
uint32_t reg_base;
- bool free[32];
- uint32_t reg[32];
+ uint32_t free_mask;
};
/*
@@ -851,6 +862,7 @@ struct amdgpu_gfx {
struct amdgpu_gca_config config;
struct amdgpu_rlc rlc;
struct amdgpu_mec mec;
+ struct amdgpu_kiq kiq;
struct amdgpu_scratch scratch;
const struct firmware *me_fw; /* ME firmware */
uint32_t me_fw_version;
@@ -894,8 +906,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
- struct amdgpu_job *job, struct dma_fence **f);
+ struct amdgpu_ib *ibs, struct amdgpu_job *job,
+ struct dma_fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
@@ -938,6 +950,7 @@ struct amdgpu_cs_parser {
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
+#define AMDGPU_VM_DOMAIN (1 << 3) /* bit set means in virtual memory context */
struct amdgpu_job {
struct amd_sched_job base;
@@ -1133,7 +1146,6 @@ int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
#if defined(CONFIG_DEBUG_FS)
int amdgpu_debugfs_init(struct drm_minor *minor);
-void amdgpu_debugfs_cleanup(struct drm_minor *minor);
#endif
int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
@@ -1178,7 +1190,6 @@ struct amdgpu_asic_funcs {
bool (*read_disabled_bios)(struct amdgpu_device *adev);
bool (*read_bios_from_rom)(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes);
- void (*detect_hw_virtualization) (struct amdgpu_device *adev);
int (*read_register)(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
@@ -1333,7 +1344,6 @@ struct amdgpu_device {
/* BIOS */
uint8_t *bios;
uint32_t bios_size;
- bool is_atom_bios;
struct amdgpu_bo *stollen_vga_memory;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -1463,7 +1473,7 @@ struct amdgpu_device {
/* amdkfd interface */
struct kfd_dev *kfd;
- struct amdgpu_virtualization virtualization;
+ struct amdgpu_virt virt;
/* link all shadow bo */
struct list_head shadow_list;
@@ -1472,6 +1482,9 @@ struct amdgpu_device {
spinlock_t gtt_list_lock;
struct list_head gtt_list;
+ /* record hw reset is performed */
+ bool has_hw_reset;
+
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1576,6 +1589,37 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
ring->count_dw--;
}
+static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw)
+{
+ unsigned occupied, chunk1, chunk2;
+ void *dst;
+
+ if (ring->count_dw < count_dw) {
+ DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+ } else {
+ occupied = ring->wptr & ring->ptr_mask;
+ dst = (void *)&ring->ring[occupied];
+ chunk1 = ring->ptr_mask + 1 - occupied;
+ chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
+ chunk2 = count_dw - chunk1;
+ chunk1 <<= 2;
+ chunk2 <<= 2;
+
+ if (chunk1)
+ memcpy(dst, src, chunk1);
+
+ if (chunk2) {
+ src += chunk1;
+ dst = (void *)ring->ring;
+ memcpy(dst, src, chunk2);
+ }
+
+ ring->wptr += count_dw;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw -= count_dw;
+ }
+}
+
static inline struct amdgpu_sdma_instance *
amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
{
@@ -1605,7 +1649,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
-#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
@@ -1627,6 +1670,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
+#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
+#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
@@ -1658,13 +1703,14 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
int amdgpu_gpu_reset(struct amdgpu_device *adev);
bool amdgpu_need_backup(struct amdgpu_device *adev);
void amdgpu_pci_config_reset(struct amdgpu_device *adev);
-bool amdgpu_card_posted(struct amdgpu_device *adev);
+bool amdgpu_need_post(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring,
struct amdgpu_ring **out_ring);
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
@@ -1711,7 +1757,7 @@ extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
-int amdgpu_driver_unload_kms(struct drm_device *dev);
+void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 5796539a0bcb..ef79551b4cb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -672,12 +672,10 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
enc->enc_priv) {
- if (adev->is_atom_bios) {
- struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
- if (dig->bl_dev) {
- atif->encoder_for_bl = enc;
- break;
- }
+ struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+ if (dig->bl_dev) {
+ atif->encoder_for_bl = enc;
+ break;
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 8ec1967a850b..821f7cc2051f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -42,6 +42,51 @@
#define AMD_IS_VALID_VBIOS(p) ((p)[0] == 0x55 && (p)[1] == 0xAA)
#define AMD_VBIOS_LENGTH(p) ((p)[2] << 9)
+/* Check if current bios is an ATOM BIOS.
+ * Return true if it is ATOM BIOS. Otherwise, return false.
+ */
+static bool check_atom_bios(uint8_t *bios, size_t size)
+{
+ uint16_t tmp, bios_header_start;
+
+ if (!bios || size < 0x49) {
+ DRM_INFO("vbios mem is null or mem size is wrong\n");
+ return false;
+ }
+
+ if (!AMD_IS_VALID_VBIOS(bios)) {
+ DRM_INFO("BIOS signature incorrect %x %x\n", bios[0], bios[1]);
+ return false;
+ }
+
+ tmp = bios[0x18] | (bios[0x19] << 8);
+ if (bios[tmp + 0x14] != 0x0) {
+ DRM_INFO("Not an x86 BIOS ROM\n");
+ return false;
+ }
+
+ bios_header_start = bios[0x48] | (bios[0x49] << 8);
+ if (!bios_header_start) {
+ DRM_INFO("Can't locate bios header\n");
+ return false;
+ }
+
+ tmp = bios_header_start + 4;
+ if (size < tmp) {
+ DRM_INFO("BIOS header is broken\n");
+ return false;
+ }
+
+ if (!memcmp(bios + tmp, "ATOM", 4) ||
+ !memcmp(bios + tmp, "MOTA", 4)) {
+ DRM_DEBUG("ATOMBIOS detected\n");
+ return true;
+ }
+
+ return false;
+}
+
+
/* If you boot an IGP board with a discrete card as the primary,
* the IGP rom is not accessible via the rom bar as the IGP rom is
* part of the system bios. On boot, the system bios puts a
@@ -55,7 +100,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
resource_size_t size = 256 * 1024; /* ??? */
if (!(adev->flags & AMD_IS_APU))
- if (!amdgpu_card_posted(adev))
+ if (amdgpu_need_post(adev))
return false;
adev->bios = NULL;
@@ -65,10 +110,6 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
return false;
}
- if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) {
- iounmap(bios);
- return false;
- }
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
iounmap(bios);
@@ -77,12 +118,18 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
+
+ if (!check_atom_bios(adev->bios, size)) {
+ kfree(adev->bios);
+ return false;
+ }
+
return true;
}
bool amdgpu_read_bios(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios, val[2];
+ uint8_t __iomem *bios;
size_t size;
adev->bios = NULL;
@@ -92,13 +139,6 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
return false;
}
- val[0] = readb(&bios[0]);
- val[1] = readb(&bios[1]);
-
- if (size == 0 || !AMD_IS_VALID_VBIOS(val)) {
- pci_unmap_rom(adev->pdev, bios);
- return false;
- }
adev->bios = kzalloc(size, GFP_KERNEL);
if (adev->bios == NULL) {
pci_unmap_rom(adev->pdev, bios);
@@ -107,6 +147,12 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
pci_unmap_rom(adev->pdev, bios);
+
+ if (!check_atom_bios(adev->bios, size)) {
+ kfree(adev->bios);
+ return false;
+ }
+
return true;
}
@@ -140,7 +186,14 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
adev->bios_size = len;
/* read complete BIOS */
- return amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
+ amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
+
+ if (!check_atom_bios(adev->bios, len)) {
+ kfree(adev->bios);
+ return false;
+ }
+
+ return true;
}
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
@@ -155,13 +208,17 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
return false;
}
- if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) {
+ adev->bios = kzalloc(size, GFP_KERNEL);
+ if (adev->bios == NULL)
return false;
- }
- adev->bios = kmemdup(bios, size, GFP_KERNEL);
- if (adev->bios == NULL) {
+
+ memcpy_fromio(adev->bios, bios, size);
+
+ if (!check_atom_bios(adev->bios, size)) {
+ kfree(adev->bios);
return false;
}
+
adev->bios_size = size;
return true;
@@ -273,7 +330,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
break;
}
- if (i == 0 || !AMD_IS_VALID_VBIOS(adev->bios)) {
+ if (!check_atom_bios(adev->bios, size)) {
kfree(adev->bios);
return false;
}
@@ -298,53 +355,59 @@ static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
#ifdef CONFIG_ACPI
static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
{
- bool ret = false;
struct acpi_table_header *hdr;
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
- GOP_VBIOS_CONTENT *vbios;
- VFCT_IMAGE_HEADER *vhdr;
+ unsigned offset;
if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
- goto out_unmap;
+ return false;
}
vfct = (UEFI_ACPI_VFCT *)hdr;
- if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
- DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
- goto out_unmap;
- }
+ offset = vfct->VBIOSImageOffset;
- vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
- vhdr = &vbios->VbiosHeader;
- DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
- vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
- vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
-
- if (vhdr->PCIBus != adev->pdev->bus->number ||
- vhdr->PCIDevice != PCI_SLOT(adev->pdev->devfn) ||
- vhdr->PCIFunction != PCI_FUNC(adev->pdev->devfn) ||
- vhdr->VendorID != adev->pdev->vendor ||
- vhdr->DeviceID != adev->pdev->device) {
- DRM_INFO("ACPI VFCT table is not for this card\n");
- goto out_unmap;
- }
+ while (offset < tbl_size) {
+ GOP_VBIOS_CONTENT *vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + offset);
+ VFCT_IMAGE_HEADER *vhdr = &vbios->VbiosHeader;
- if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
- DRM_ERROR("ACPI VFCT image truncated\n");
- goto out_unmap;
- }
+ offset += sizeof(VFCT_IMAGE_HEADER);
+ if (offset > tbl_size) {
+ DRM_ERROR("ACPI VFCT image header truncated\n");
+ return false;
+ }
+
+ offset += vhdr->ImageLength;
+ if (offset > tbl_size) {
+ DRM_ERROR("ACPI VFCT image truncated\n");
+ return false;
+ }
- adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
- adev->bios_size = vhdr->ImageLength;
- ret = !!adev->bios;
+ if (vhdr->ImageLength &&
+ vhdr->PCIBus == adev->pdev->bus->number &&
+ vhdr->PCIDevice == PCI_SLOT(adev->pdev->devfn) &&
+ vhdr->PCIFunction == PCI_FUNC(adev->pdev->devfn) &&
+ vhdr->VendorID == adev->pdev->vendor &&
+ vhdr->DeviceID == adev->pdev->device) {
+ adev->bios = kmemdup(&vbios->VbiosContent,
+ vhdr->ImageLength,
+ GFP_KERNEL);
+
+ if (!check_atom_bios(adev->bios, vhdr->ImageLength)) {
+ kfree(adev->bios);
+ return false;
+ }
+ adev->bios_size = vhdr->ImageLength;
+ return true;
+ }
+ }
-out_unmap:
- return ret;
+ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+ return false;
}
#else
static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
@@ -355,57 +418,27 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
bool amdgpu_get_bios(struct amdgpu_device *adev)
{
- bool r;
- uint16_t tmp, bios_header_start;
+ if (amdgpu_atrm_get_bios(adev))
+ return true;
- r = amdgpu_atrm_get_bios(adev);
- if (!r)
- r = amdgpu_acpi_vfct_bios(adev);
- if (!r)
- r = igp_read_bios_from_vram(adev);
- if (!r)
- r = amdgpu_read_bios(adev);
- if (!r) {
- r = amdgpu_read_bios_from_rom(adev);
- }
- if (!r) {
- r = amdgpu_read_disabled_bios(adev);
- }
- if (!r) {
- r = amdgpu_read_platform_bios(adev);
- }
- if (!r || adev->bios == NULL) {
- DRM_ERROR("Unable to locate a BIOS ROM\n");
- adev->bios = NULL;
- return false;
- }
- if (!AMD_IS_VALID_VBIOS(adev->bios)) {
- printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]);
- goto free_bios;
- }
+ if (amdgpu_acpi_vfct_bios(adev))
+ return true;
- tmp = RBIOS16(0x18);
- if (RBIOS8(tmp + 0x14) != 0x0) {
- DRM_INFO("Not an x86 BIOS ROM, not using.\n");
- goto free_bios;
- }
+ if (igp_read_bios_from_vram(adev))
+ return true;
- bios_header_start = RBIOS16(0x48);
- if (!bios_header_start) {
- goto free_bios;
- }
- tmp = bios_header_start + 4;
- if (!memcmp(adev->bios + tmp, "ATOM", 4) ||
- !memcmp(adev->bios + tmp, "MOTA", 4)) {
- adev->is_atom_bios = true;
- } else {
- adev->is_atom_bios = false;
- }
+ if (amdgpu_read_bios(adev))
+ return true;
- DRM_DEBUG("%sBIOS detected\n", adev->is_atom_bios ? "ATOM" : "COM");
- return true;
-free_bios:
- kfree(adev->bios);
- adev->bios = NULL;
+ if (amdgpu_read_bios_from_rom(adev))
+ return true;
+
+ if (amdgpu_read_disabled_bios(adev))
+ return true;
+
+ if (amdgpu_read_platform_bios(adev))
+ return true;
+
+ DRM_ERROR("Unable to locate a BIOS ROM\n");
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 4c851fde1e82..d9e5aa4a79ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -713,6 +713,7 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode
CGS_FUNC_ADEV;
if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
return 0;
}
/* cannot release other firmware because they are not created by cgs */
@@ -762,6 +763,23 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
return fw_version;
}
+static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
+ bool en)
+{
+ CGS_FUNC_ADEV;
+
+ if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
+ adev->gfx.rlc.funcs->exit_safe_mode == NULL)
+ return 0;
+
+ if (en)
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ else
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
+
+ return 0;
+}
+
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
@@ -808,37 +826,65 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
const uint8_t *src;
const struct smc_firmware_header_v1_0 *hdr;
+ if (CGS_UCODE_ID_SMU_SK == type)
+ amdgpu_cgs_rel_firmware(cgs_device, CGS_UCODE_ID_SMU);
+
if (!adev->pm.fw) {
switch (adev->asic_type) {
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
- ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
+ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
+ info->is_kicker = true;
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
- else
+ } else
strcpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
- ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
+ ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
+ info->is_kicker = true;
strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
- else
+ } else
strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin");
break;
case CHIP_POLARIS11:
- if (type == CGS_UCODE_ID_SMU)
- strcpy(fw_name, "amdgpu/polaris11_smc.bin");
- else if (type == CGS_UCODE_ID_SMU_SK)
+ if (type == CGS_UCODE_ID_SMU) {
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe2) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+ (adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+ } else
+ strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ } else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+ }
break;
case CHIP_POLARIS10:
- if (type == CGS_UCODE_ID_SMU)
- strcpy(fw_name, "amdgpu/polaris10_smc.bin");
- else if (type == CGS_UCODE_ID_SMU_SK)
+ if (type == CGS_UCODE_ID_SMU) {
+ if ((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe3) ||
+ (adev->pdev->revision == 0xe4) ||
+ (adev->pdev->revision == 0xe5) ||
+ (adev->pdev->revision == 0xe7) ||
+ (adev->pdev->revision == 0xef))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+ } else
+ strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ } else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+ }
break;
case CHIP_POLARIS12:
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
@@ -1200,51 +1246,52 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
}
static const struct cgs_ops amdgpu_cgs_ops = {
- amdgpu_cgs_gpu_mem_info,
- amdgpu_cgs_gmap_kmem,
- amdgpu_cgs_gunmap_kmem,
- amdgpu_cgs_alloc_gpu_mem,
- amdgpu_cgs_free_gpu_mem,
- amdgpu_cgs_gmap_gpu_mem,
- amdgpu_cgs_gunmap_gpu_mem,
- amdgpu_cgs_kmap_gpu_mem,
- amdgpu_cgs_kunmap_gpu_mem,
- amdgpu_cgs_read_register,
- amdgpu_cgs_write_register,
- amdgpu_cgs_read_ind_register,
- amdgpu_cgs_write_ind_register,
- amdgpu_cgs_read_pci_config_byte,
- amdgpu_cgs_read_pci_config_word,
- amdgpu_cgs_read_pci_config_dword,
- amdgpu_cgs_write_pci_config_byte,
- amdgpu_cgs_write_pci_config_word,
- amdgpu_cgs_write_pci_config_dword,
- amdgpu_cgs_get_pci_resource,
- amdgpu_cgs_atom_get_data_table,
- amdgpu_cgs_atom_get_cmd_table_revs,
- amdgpu_cgs_atom_exec_cmd_table,
- amdgpu_cgs_create_pm_request,
- amdgpu_cgs_destroy_pm_request,
- amdgpu_cgs_set_pm_request,
- amdgpu_cgs_pm_request_clock,
- amdgpu_cgs_pm_request_engine,
- amdgpu_cgs_pm_query_clock_limits,
- amdgpu_cgs_set_camera_voltages,
- amdgpu_cgs_get_firmware_info,
- amdgpu_cgs_rel_firmware,
- amdgpu_cgs_set_powergating_state,
- amdgpu_cgs_set_clockgating_state,
- amdgpu_cgs_get_active_displays_info,
- amdgpu_cgs_notify_dpm_enabled,
- amdgpu_cgs_call_acpi_method,
- amdgpu_cgs_query_system_info,
- amdgpu_cgs_is_virtualization_enabled
+ .gpu_mem_info = amdgpu_cgs_gpu_mem_info,
+ .gmap_kmem = amdgpu_cgs_gmap_kmem,
+ .gunmap_kmem = amdgpu_cgs_gunmap_kmem,
+ .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
+ .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
+ .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
+ .gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
+ .kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
+ .kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
+ .read_register = amdgpu_cgs_read_register,
+ .write_register = amdgpu_cgs_write_register,
+ .read_ind_register = amdgpu_cgs_read_ind_register,
+ .write_ind_register = amdgpu_cgs_write_ind_register,
+ .read_pci_config_byte = amdgpu_cgs_read_pci_config_byte,
+ .read_pci_config_word = amdgpu_cgs_read_pci_config_word,
+ .read_pci_config_dword = amdgpu_cgs_read_pci_config_dword,
+ .write_pci_config_byte = amdgpu_cgs_write_pci_config_byte,
+ .write_pci_config_word = amdgpu_cgs_write_pci_config_word,
+ .write_pci_config_dword = amdgpu_cgs_write_pci_config_dword,
+ .get_pci_resource = amdgpu_cgs_get_pci_resource,
+ .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
+ .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
+ .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
+ .create_pm_request = amdgpu_cgs_create_pm_request,
+ .destroy_pm_request = amdgpu_cgs_destroy_pm_request,
+ .set_pm_request = amdgpu_cgs_set_pm_request,
+ .pm_request_clock = amdgpu_cgs_pm_request_clock,
+ .pm_request_engine = amdgpu_cgs_pm_request_engine,
+ .pm_query_clock_limits = amdgpu_cgs_pm_query_clock_limits,
+ .set_camera_voltages = amdgpu_cgs_set_camera_voltages,
+ .get_firmware_info = amdgpu_cgs_get_firmware_info,
+ .rel_firmware = amdgpu_cgs_rel_firmware,
+ .set_powergating_state = amdgpu_cgs_set_powergating_state,
+ .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
+ .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
+ .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
+ .call_acpi_method = amdgpu_cgs_call_acpi_method,
+ .query_system_info = amdgpu_cgs_query_system_info,
+ .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
+ .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
- amdgpu_cgs_add_irq_source,
- amdgpu_cgs_irq_get,
- amdgpu_cgs_irq_put
+ .add_irq_source = amdgpu_cgs_add_irq_source,
+ .irq_get = amdgpu_cgs_irq_get,
+ .irq_put = amdgpu_cgs_irq_put
};
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 41e41f90265d..d2d0f60ff36d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -75,10 +75,10 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
*out_ring = &adev->uvd.ring;
break;
case AMDGPU_HW_IP_VCE:
- if (ring < 2){
+ if (ring < adev->vce.num_rings){
*out_ring = &adev->vce.ring[ring];
} else {
- DRM_ERROR("only two VCE rings are supported\n");
+ DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings);
return -EINVAL;
}
break;
@@ -351,8 +351,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
* submission. This can result in a debt that can stop buffer migrations
* temporarily.
*/
-static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
- u64 num_bytes)
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
{
spin_lock(&adev->mm_stats.lock);
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
@@ -778,6 +777,20 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r)
return r;
+ if (amdgpu_sriov_vf(adev)) {
+ struct dma_fence *f;
+ bo_va = vm->csa_bo_va;
+ BUG_ON(!bo_va);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ return r;
+
+ f = bo_va->last_pt_update;
+ r = amdgpu_sync_fence(adev, &p->job->sync, f);
+ if (r)
+ return r;
+ }
+
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
struct dma_fence *f;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index fe3bb94fe58d..6abb238b25c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -94,6 +94,11 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{
uint32_t ret;
+ if (amdgpu_sriov_runtime(adev)) {
+ BUG_ON(in_interrupt());
+ return amdgpu_virt_kiq_rreg(adev, reg);
+ }
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
else {
@@ -113,6 +118,11 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
{
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+ if (amdgpu_sriov_runtime(adev)) {
+ BUG_ON(in_interrupt());
+ return amdgpu_virt_kiq_wreg(adev, reg, v);
+ }
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
else {
@@ -609,25 +619,29 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
* GPU helpers function.
*/
/**
- * amdgpu_card_posted - check if the hw has already been initialized
+ * amdgpu_need_post - check if the hw need post or not
*
* @adev: amdgpu_device pointer
*
- * Check if the asic has been initialized (all asics).
- * Used at driver startup.
- * Returns true if initialized or false if not.
+ * Check if the asic has been initialized (all asics) at driver startup
+ * or post is needed if hw reset is performed.
+ * Returns true if need or false if not.
*/
-bool amdgpu_card_posted(struct amdgpu_device *adev)
+bool amdgpu_need_post(struct amdgpu_device *adev)
{
uint32_t reg;
+ if (adev->has_hw_reset) {
+ adev->has_hw_reset = false;
+ return true;
+ }
/* then check MEM_SIZE, in case the crtcs are off */
reg = RREG32(mmCONFIG_MEMSIZE);
if (reg)
- return true;
+ return false;
- return false;
+ return true;
}
@@ -655,7 +669,7 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
return true;
}
}
- return !amdgpu_card_posted(adev);
+ return amdgpu_need_post(adev);
}
/**
@@ -885,7 +899,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
atom_card_info->ioreg_read = cail_ioreg_read;
atom_card_info->ioreg_write = cail_ioreg_write;
} else {
- DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
+ DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
atom_card_info->ioreg_read = cail_reg_read;
atom_card_info->ioreg_write = cail_reg_write;
}
@@ -1131,6 +1145,18 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
return r;
}
+void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
+ adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
+ }
+}
+
int amdgpu_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
@@ -1235,7 +1261,8 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
pciaddstr_tmp = pciaddstr;
while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
pciaddname = strsep(&pciaddname_tmp, ",");
- if (!strcmp(pci_address_name, pciaddname)) {
+ if (!strcmp("all", pciaddname)
+ || !strcmp(pci_address_name, pciaddname)) {
long num_crtc;
int res = -1;
@@ -1323,6 +1350,12 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return r;
+ }
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
@@ -1383,6 +1416,15 @@ static int amdgpu_init(struct amdgpu_device *adev)
return r;
}
adev->ip_blocks[i].status.hw = true;
+
+ /* right after GMC hw init, we create CSA */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_allocate_static_csa(adev);
+ if (r) {
+ DRM_ERROR("allocate CSA failed %d\n", r);
+ return r;
+ }
+ }
}
}
@@ -1516,6 +1558,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false;
}
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
+ amdgpu_virt_release_full_gpu(adev, false);
+ }
+
return 0;
}
@@ -1523,6 +1570,9 @@ int amdgpu_suspend(struct amdgpu_device *adev)
{
int i, r;
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
/* ungate SMC block first */
r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
AMD_CG_STATE_UNGATE);
@@ -1551,6 +1601,9 @@ int amdgpu_suspend(struct amdgpu_device *adev)
}
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
return 0;
}
@@ -1575,7 +1628,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
- adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
}
/**
@@ -1605,7 +1658,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->pdev = pdev;
adev->flags = flags;
adev->asic_type = flags & AMD_ASIC_MASK;
- adev->is_atom_bios = false;
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
adev->mc.gtt_size = 512 * 1024 * 1024;
adev->accel_working = false;
@@ -1695,7 +1747,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
if (adev->rio_mem == NULL)
- DRM_ERROR("Unable to find PCI I/O BAR\n");
+ DRM_INFO("PCI I/O BAR is not found.\n");
/* early init functions */
r = amdgpu_early_init(adev);
@@ -1720,12 +1772,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = -EINVAL;
goto failed;
}
- /* Must be an ATOMBIOS */
- if (!adev->is_atom_bios) {
- dev_err(adev->dev, "Expecting atombios for GPU\n");
- r = -EINVAL;
- goto failed;
- }
+
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
@@ -1852,8 +1899,6 @@ failed:
return r;
}
-static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
-
/**
* amdgpu_device_fini - tear down the driver
*
@@ -1893,7 +1938,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_BONAIRE)
amdgpu_doorbell_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
- amdgpu_debugfs_remove_files(adev);
}
@@ -2031,7 +2075,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
- if (!amdgpu_card_posted(adev) || !resume) {
+ if (amdgpu_need_post(adev)) {
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r)
DRM_ERROR("amdgpu asic init failed\n");
@@ -2252,6 +2296,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
int resched;
bool need_full_reset;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if (!amdgpu_check_soft_reset(adev)) {
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
return 0;
@@ -2507,19 +2554,6 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
return 0;
}
-static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < adev->debugfs_count; i++) {
- drm_debugfs_remove_files(adev->debugfs[i].files,
- adev->debugfs[i].num_files,
- adev->ddev->primary);
- }
-#endif
-}
-
#if defined(CONFIG_DEBUG_FS)
static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
@@ -2853,7 +2887,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
return -ENOMEM;
/* version, increment each time something is added */
- config[no_regs++] = 2;
+ config[no_regs++] = 3;
config[no_regs++] = adev->gfx.config.max_shader_engines;
config[no_regs++] = adev->gfx.config.max_tile_pipes;
config[no_regs++] = adev->gfx.config.max_cu_per_sh;
@@ -2887,6 +2921,12 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
config[no_regs++] = adev->family;
config[no_regs++] = adev->external_rev_id;
+ /* rev==3 */
+ config[no_regs++] = adev->pdev->device;
+ config[no_regs++] = adev->pdev->revision;
+ config[no_regs++] = adev->pdev->subsystem_device;
+ config[no_regs++] = adev->pdev->subsystem_vendor;
+
while (size && (*pos < no_regs * 4)) {
uint32_t value;
@@ -3153,10 +3193,6 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
{
return 0;
}
-
-void amdgpu_debugfs_cleanup(struct drm_minor *minor)
-{
-}
#else
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 581601ca6b89..39fc388f222a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -138,10 +138,52 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
kfree(work);
}
-int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags, uint32_t target)
+
+static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work)
+{
+ int i;
+
+ amdgpu_bo_unref(&work->old_abo);
+ dma_fence_put(work->excl);
+ for (i = 0; i < work->shared_count; ++i)
+ dma_fence_put(work->shared[i]);
+ kfree(work->shared);
+ kfree(work);
+}
+
+static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work,
+ struct amdgpu_bo *new_abo)
+{
+ amdgpu_bo_unreserve(new_abo);
+ amdgpu_flip_work_cleanup(work);
+}
+
+static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work,
+ struct amdgpu_bo *new_abo)
+{
+ if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
+ DRM_ERROR("failed to unpin new abo in error path\n");
+ amdgpu_flip_cleanup_unreserve(work, new_abo);
+}
+
+void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
+ struct amdgpu_bo *new_abo)
+{
+ if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
+ DRM_ERROR("failed to reserve new abo in error path\n");
+ amdgpu_flip_work_cleanup(work);
+ return;
+ }
+ amdgpu_flip_cleanup_unpin(work, new_abo);
+}
+
+int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ uint32_t target,
+ struct amdgpu_flip_work **work_p,
+ struct amdgpu_bo **new_abo_p)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
@@ -154,7 +196,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
unsigned long flags;
u64 tiling_flags;
u64 base;
- int i, r;
+ int r;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
@@ -189,7 +231,6 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
- r = -EINVAL;
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
}
@@ -216,41 +257,79 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY;
goto pflip_cleanup;
+
}
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ *work_p = work;
+ *new_abo_p = new_abo;
+
+ return 0;
+
+pflip_cleanup:
+ amdgpu_crtc_cleanup_flip_ctx(work, new_abo);
+ return r;
+unpin:
+ amdgpu_flip_cleanup_unpin(work, new_abo);
+ return r;
+
+unreserve:
+ amdgpu_flip_cleanup_unreserve(work, new_abo);
+ return r;
+
+cleanup:
+ amdgpu_flip_work_cleanup(work);
+ return r;
+
+}
+
+void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct amdgpu_flip_work *work,
+ struct amdgpu_bo *new_abo)
+{
+ unsigned long flags;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
amdgpu_crtc->pflip_works = work;
-
- DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
- amdgpu_crtc->crtc_id, amdgpu_crtc, work);
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ DRM_DEBUG_DRIVER(
+ "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc, work);
+
amdgpu_flip_work_func(&work->flip_work.work);
- return 0;
+}
-pflip_cleanup:
- if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
- DRM_ERROR("failed to reserve new abo in error path\n");
- goto cleanup;
- }
-unpin:
- if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
- DRM_ERROR("failed to unpin new abo in error path\n");
- }
-unreserve:
- amdgpu_bo_unreserve(new_abo);
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ uint32_t target)
+{
+ struct amdgpu_bo *new_abo;
+ struct amdgpu_flip_work *work;
+ int r;
-cleanup:
- amdgpu_bo_unref(&work->old_abo);
- dma_fence_put(work->excl);
- for (i = 0; i < work->shared_count; ++i)
- dma_fence_put(work->shared[i]);
- kfree(work->shared);
- kfree(work);
+ r = amdgpu_crtc_prepare_flip(crtc,
+ fb,
+ event,
+ page_flip_flags,
+ target,
+ &work,
+ &new_abo);
+ if (r)
+ return r;
- return r;
+ amdgpu_crtc_submit_flip(crtc, fb, work, new_abo);
+
+ return 0;
}
int amdgpu_crtc_set_config(struct drm_mode_set *set)
@@ -508,7 +587,7 @@ amdgpu_framebuffer_init(struct drm_device *dev,
{
int ret;
rfb->obj = obj;
- drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret) {
rfb->obj = NULL;
@@ -582,12 +661,10 @@ int amdgpu_modeset_create_props(struct amdgpu_device *adev)
{
int sz;
- if (adev->is_atom_bios) {
- adev->mode_info.coherent_mode_property =
- drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
- if (!adev->mode_info.coherent_mode_property)
- return -ENOMEM;
- }
+ adev->mode_info.coherent_mode_property =
+ drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
+ if (!adev->mode_info.coherent_mode_property)
+ return -ENOMEM;
adev->mode_info.load_detect_property =
drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 955d6f21e2b3..fa2b55681422 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -241,13 +241,6 @@ enum amdgpu_pcie_gen {
AMDGPU_PCIE_GEN_INVALID = 0xffff
};
-enum amdgpu_dpm_forced_level {
- AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
- AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
- AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
- AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
-};
-
struct amdgpu_dpm_funcs {
int (*get_temperature)(struct amdgpu_device *adev);
int (*pre_set_power_state)(struct amdgpu_device *adev);
@@ -258,7 +251,7 @@ struct amdgpu_dpm_funcs {
u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
- int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
+ int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level);
bool (*vblank_too_short)(struct amdgpu_device *adev);
void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
@@ -353,9 +346,6 @@ struct amdgpu_dpm_funcs {
#define amdgpu_dpm_get_current_power_state(adev) \
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
-#define amdgpu_dpm_get_performance_level(adev) \
- (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
-
#define amdgpu_dpm_get_pp_num_states(adev, data) \
(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
@@ -393,6 +383,11 @@ struct amdgpu_dpm_funcs {
(adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
(adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+#define amdgpu_dpm_get_performance_level(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \
+ (adev)->pm.dpm.forced_level)
+
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@@ -440,7 +435,7 @@ struct amdgpu_dpm {
/* thermal handling */
struct amdgpu_dpm_thermal thermal;
/* forced levels */
- enum amdgpu_dpm_forced_level forced_level;
+ enum amd_dpm_forced_level forced_level;
};
struct amdgpu_pm {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 2534adaebe30..75fc376ba735 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,6 @@ int amdgpu_vram_page_split = 1024;
int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
-int amdgpu_powerplay = -1;
int amdgpu_no_evict = 0;
int amdgpu_direct_gma_size = 0;
unsigned amdgpu_pcie_gen_cap = 0;
@@ -179,9 +178,6 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
-MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
-module_param_named(powerplay, amdgpu_powerplay, int, 0444);
-
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
@@ -686,7 +682,6 @@ static struct drm_driver kms_driver = {
DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
- .dev_priv_size = 0,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.preclose = amdgpu_driver_preclose_kms,
@@ -701,7 +696,6 @@ static struct drm_driver kms_driver = {
.get_scanout_position = amdgpu_get_crtc_scanoutpos,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = amdgpu_debugfs_init,
- .debugfs_cleanup = amdgpu_debugfs_cleanup,
#endif
.irq_preinstall = amdgpu_irq_preinstall,
.irq_postinstall = amdgpu_irq_postinstall,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 24629bec181a..36ce3cac81ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -245,7 +245,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "amdgpudrmfb");
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &amdgpufb_ops;
@@ -272,7 +272,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
- DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
@@ -374,7 +374,6 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
&amdgpu_fb_helper_funcs);
ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
- adev->mode_info.num_crtc,
AMDGPUFB_CONN_LIMIT);
if (ret) {
kfree(rfbdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index cd62f6ffde2a..51d759463384 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -471,12 +471,15 @@ out:
static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
{
- unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
-
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
+ if (!amdgpu_bo_gpu_accessible(bo))
+ return -ERESTARTSYS;
+
+ if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
+ return -ERESTARTSYS;
- return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
+ return 0;
}
/**
@@ -484,62 +487,44 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
*
* @adev: amdgpu_device pointer
* @bo_va: bo_va to update
+ * @list: validation list
+ * @operation: map or unmap
*
- * Update the bo_va directly after setting it's address. Errors are not
+ * Update the bo_va directly after setting its address. Errors are not
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
+ struct list_head *list,
uint32_t operation)
{
- struct ttm_validate_buffer tv, *entry;
- struct amdgpu_bo_list_entry vm_pd;
- struct ww_acquire_ctx ticket;
- struct list_head list, duplicates;
- unsigned domain;
- int r;
-
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&duplicates);
-
- tv.bo = &bo_va->bo->tbo;
- tv.shared = true;
- list_add(&tv.head, &list);
-
- amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
-
- /* Provide duplicates to avoid -EALREADY */
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
- if (r)
- goto error_print;
-
- list_for_each_entry(entry, &list, head) {
- domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (domain == AMDGPU_GEM_DOMAIN_CPU)
- goto error_unreserve;
+ struct ttm_validate_buffer *entry;
+ int r = -ERESTARTSYS;
+
+ list_for_each_entry(entry, list, head) {
+ struct amdgpu_bo *bo =
+ container_of(entry->bo, struct amdgpu_bo, tbo);
+ if (amdgpu_gem_va_check(NULL, bo))
+ goto error;
}
+
r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
NULL);
if (r)
- goto error_unreserve;
+ goto error;
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r)
- goto error_unreserve;
+ goto error;
r = amdgpu_vm_clear_freed(adev, bo_va->vm);
if (r)
- goto error_unreserve;
+ goto error;
if (operation == AMDGPU_VA_OP_MAP)
r = amdgpu_vm_bo_update(adev, bo_va, false);
-error_unreserve:
- ttm_eu_backoff_reservation(&ticket, &list);
-
-error_print:
+error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
@@ -556,7 +541,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
- struct list_head list, duplicates;
+ struct list_head list;
uint32_t invalid_flags, va_flags = 0;
int r = 0;
@@ -594,14 +579,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&duplicates);
tv.bo = &abo->tbo;
- tv.shared = true;
+ tv.shared = false;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
drm_gem_object_unreference_unlocked(gobj);
return r;
@@ -632,10 +616,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
default:
break;
}
- ttm_eu_backoff_reservation(&ticket, &list);
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
!amdgpu_vm_debug)
- amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
+ amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation);
+ ttm_eu_backoff_reservation(&ticket, &list);
drm_gem_object_unreference_unlocked(gobj);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 01a42b6a69a4..19943356cca7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -42,12 +42,12 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
{
int i;
- for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
- if (adev->gfx.scratch.free[i]) {
- adev->gfx.scratch.free[i] = false;
- *reg = adev->gfx.scratch.reg[i];
- return 0;
- }
+ i = ffs(adev->gfx.scratch.free_mask);
+ if (i != 0 && i <= adev->gfx.scratch.num_reg) {
+ i--;
+ adev->gfx.scratch.free_mask &= ~(1u << i);
+ *reg = adev->gfx.scratch.reg_base + i;
+ return 0;
}
return -EINVAL;
}
@@ -62,14 +62,7 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
*/
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
{
- int i;
-
- for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
- if (adev->gfx.scratch.reg[i] == reg) {
- adev->gfx.scratch.free[i] = true;
- return;
- }
- }
+ adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 00f46b0e076d..0335c2f331e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -97,8 +97,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
{
struct amdgpu_gtt_mgr *mgr = man->priv;
struct drm_mm_node *node = mem->mm_node;
- enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
- enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ enum drm_mm_insert_mode mode;
unsigned long fpfn, lpfn;
int r;
@@ -115,15 +114,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
else
lpfn = man->size;
- if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
- sflags = DRM_MM_SEARCH_BELOW;
- aflags = DRM_MM_CREATE_TOP;
- }
+ mode = DRM_MM_INSERT_BEST;
+ if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
+ mode = DRM_MM_INSERT_HIGH;
spin_lock(&mgr->lock);
- r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
- mem->page_alignment, 0,
- fpfn, lpfn, sflags, aflags);
+ r = drm_mm_insert_node_in_range(&mgr->mm, node,
+ mem->num_pages, mem->page_alignment, 0,
+ fpfn, lpfn, mode);
spin_unlock(&mgr->lock);
if (!r) {
@@ -235,16 +233,17 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&mgr->lock);
- drm_mm_debug_table(&mgr->mm, prefix);
+ drm_mm_print(&mgr->mm, &p);
spin_unlock(&mgr->lock);
}
const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
- amdgpu_gtt_mgr_init,
- amdgpu_gtt_mgr_fini,
- amdgpu_gtt_mgr_new,
- amdgpu_gtt_mgr_del,
- amdgpu_gtt_mgr_debug
+ .init = amdgpu_gtt_mgr_init,
+ .takedown = amdgpu_gtt_mgr_fini,
+ .get_node = amdgpu_gtt_mgr_new,
+ .put_node = amdgpu_gtt_mgr_del,
+ .debug = amdgpu_gtt_mgr_debug
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 91d367399956..f2739995c335 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -231,8 +231,7 @@ void amdgpu_i2c_init(struct amdgpu_device *adev)
if (amdgpu_hw_i2c)
DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
- if (adev->is_atom_bios)
- amdgpu_atombios_i2c_init(adev);
+ amdgpu_atombios_i2c_init(adev);
}
/* remove all the buses */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 216a9572d946..e02a70dd37b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
* to SI there was just a DE IB.
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
- struct amdgpu_job *job, struct dma_fence **f)
+ struct amdgpu_ib *ibs, struct amdgpu_job *job,
+ struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
@@ -175,15 +175,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
- /* always set cond_exec_polling to CONTINUE */
- *ring->cond_exe_cpu_addr = 1;
-
skip_preamble = ring->current_ctx == fence_ctx;
need_ctx_switch = ring->current_ctx != fence_ctx;
if (job && ring->funcs->emit_cntxcntl) {
if (need_ctx_switch)
status |= AMDGPU_HAVE_CTX_SWITCH;
status |= job->preamble_status;
+
+ if (vm)
+ status |= AMDGPU_VM_DOMAIN;
amdgpu_ring_emit_cntxcntl(ring, status);
}
@@ -193,7 +193,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* drop preamble IBs if we don't have a context switch */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
skip_preamble &&
- !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST))
+ !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
+ !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
@@ -223,7 +224,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
amdgpu_ring_patch_cond_exec(ring, patch_offset);
ring->current_ctx = fence_ctx;
- if (ring->funcs->emit_switch_buffer)
+ if (vm && ring->funcs->emit_switch_buffer)
amdgpu_ring_emit_switch_buffer(ring);
amdgpu_ring_commit(ring);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index fb902932f571..e63ece049b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -61,10 +61,8 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
struct drm_connector *connector;
mutex_lock(&mode_config->mutex);
- if (mode_config->num_connector) {
- list_for_each_entry(connector, &mode_config->connector_list, head)
- amdgpu_connector_hotplug(connector);
- }
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ amdgpu_connector_hotplug(connector);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index a0de6286c453..86a12424c162 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -170,8 +170,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
- job->sync.last_vm_update, job, &fence);
+ r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 9af87eaf8ee3..61d94c745672 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -50,16 +50,19 @@ static inline bool amdgpu_has_atpx(void) { return false; }
* This is the main unload function for KMS (all asics).
* Returns 0 on success.
*/
-int amdgpu_driver_unload_kms(struct drm_device *dev)
+void amdgpu_driver_unload_kms(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
if (adev == NULL)
- return 0;
+ return;
if (adev->rmmio == NULL)
goto done_free;
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
if (amdgpu_device_is_px(dev)) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
@@ -74,7 +77,6 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
done_free:
kfree(adev);
dev->dev_private = NULL;
- return 0;
}
/**
@@ -139,6 +141,9 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
pm_runtime_put_autosuspend(dev->dev);
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, true);
+
out:
if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
@@ -570,6 +575,27 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -EINVAL;
}
}
+ case AMDGPU_INFO_NUM_HANDLES: {
+ struct drm_amdgpu_info_num_handles handle;
+
+ switch (info->query_hw_ip.type) {
+ case AMDGPU_HW_IP_UVD:
+ /* Starting Polaris, we support unlimited UVD handles */
+ if (adev->asic_type < CHIP_POLARIS10) {
+ handle.uvd_max_handles = adev->uvd.max_handles;
+ handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
+
+ return copy_to_user(out, &handle,
+ min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
+ } else {
+ return -ENODATA;
+ }
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -629,6 +655,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto out_suspend;
}
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_map_static_csa(adev, &fpriv->vm);
+ if (r)
+ goto out_suspend;
+ }
+
mutex_init(&fpriv->bo_list_lock);
idr_init(&fpriv->bo_list_handles);
@@ -667,6 +699,14 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_uvd_free_handles(adev, file_priv);
amdgpu_vce_free_handles(adev, file_priv);
+ if (amdgpu_sriov_vf(adev)) {
+ /* TODO: how to handle reserve failure */
+ BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false));
+ amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
+ fpriv->vm.csa_bo_va = NULL;
+ amdgpu_bo_unreserve(adev->virt.csa_obj);
+ }
+
amdgpu_vm_fini(adev, &fpriv->vm);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 202b4176b74e..c12497bd3889 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -32,6 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
@@ -594,6 +595,21 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target);
+void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
+ struct amdgpu_bo *new_abo);
+int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ uint32_t target,
+ struct amdgpu_flip_work **work,
+ struct amdgpu_bo **new_abo);
+
+void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct amdgpu_flip_work *work,
+ struct amdgpu_bo *new_abo);
+
extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index bf79b73e1538..be80a4a68d7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -323,6 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct amdgpu_bo *bo;
enum ttm_bo_type type;
unsigned long page_align;
+ u64 initial_bytes_moved;
size_t acc_size;
int r;
@@ -363,11 +364,33 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->flags = flags;
+#ifdef CONFIG_X86_32
+ /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+ */
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
+ /* Don't try to enable write-combining when it can't work, or things
+ * may be slow
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
+ */
+
+#ifndef CONFIG_COMPILE_TEST
+#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
+ thanks to write-combining
+#endif
+
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+ "better performance thanks to write-combining\n");
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+#else
/* For architectures that don't support WC memory,
* mask out the WC flag from the BO
*/
if (!drm_arch_can_wc_memory())
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+#endif
amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */
@@ -379,12 +402,25 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
WARN_ON(!locked);
}
+
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
&amdgpu_ttm_bo_destroy);
- if (unlikely(r != 0))
+ amdgpu_cs_report_moved_bytes(adev,
+ atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
+
+ if (unlikely(r != 0)) {
+ if (!resv)
+ ww_mutex_unlock(&bo->tbo.resv->lock);
return r;
+ }
+
+ bo->tbo.priority = ilog2(bo->tbo.num_pages);
+ if (kernel)
+ bo->tbo.priority *= 2;
+ bo->tbo.priority = min(bo->tbo.priority, (unsigned)(TTM_MAX_BO_PRIORITY - 1));
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
@@ -408,7 +444,8 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
return 0;
fail_unreserve:
- ww_mutex_unlock(&bo->tbo.resv->lock);
+ if (!resv)
+ ww_mutex_unlock(&bo->tbo.resv->lock);
amdgpu_bo_unref(&bo);
return r;
}
@@ -472,7 +509,16 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
+ if (!resv) {
+ r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL);
+ WARN_ON(r != 0);
+ }
+
r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
+
+ if (!resv)
+ ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock);
+
if (r)
amdgpu_bo_unref(bo_ptr);
}
@@ -849,6 +895,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
}
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
@@ -861,6 +908,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
amdgpu_vm_bo_invalidate(adev, abo);
+ /* remember the eviction */
+ if (evict)
+ atomic64_inc(&adev->num_evictions);
+
/* update statistics */
if (!new_mem)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 5cbf59ec0f68..15a723adca76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -114,6 +114,15 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
return drm_vma_node_offset_addr(&bo->tbo.vma_node);
}
+/**
+ * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
+ * is accessible to the GPU.
+ */
+static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
+{
+ return bo->tbo.mem.mem_type != TTM_PL_SYSTEM;
+}
+
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
@@ -155,7 +164,8 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags);
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem);
+ bool evict,
+ struct ttm_mem_reg *new_mem);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 723ae682bf25..346e80a7119b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -34,6 +34,28 @@
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
+static const struct cg_flag_name clocks[] = {
+ {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
+ {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
+ {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
+ {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
+ {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
+ {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
+ {0, NULL},
+};
+
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
{
if (adev->pp_enabled)
@@ -112,28 +134,23 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ enum amd_dpm_forced_level level;
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n");
- if (adev->pp_enabled) {
- enum amd_dpm_forced_level level;
-
- level = amdgpu_dpm_get_performance_level(adev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
- (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
- (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
- (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown");
- } else {
- enum amdgpu_dpm_forced_level level;
-
- level = adev->pm.dpm.forced_level;
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
- (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
- }
+ level = amdgpu_dpm_get_performance_level(adev);
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
+ (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
+ (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
+ (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
+ "unknown");
}
static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
@@ -143,7 +160,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- enum amdgpu_dpm_forced_level level;
+ enum amd_dpm_forced_level level;
+ enum amd_dpm_forced_level current_level;
int ret = 0;
/* Can't force performance level when the card is off */
@@ -151,19 +169,34 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
+ current_level = amdgpu_dpm_get_performance_level(adev);
+
if (strncmp("low", buf, strlen("low")) == 0) {
- level = AMDGPU_DPM_FORCED_LEVEL_LOW;
+ level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
- level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
+ level = AMD_DPM_FORCED_LEVEL_HIGH;
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
- level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+ level = AMD_DPM_FORCED_LEVEL_AUTO;
} else if (strncmp("manual", buf, strlen("manual")) == 0) {
- level = AMDGPU_DPM_FORCED_LEVEL_MANUAL;
- } else {
+ level = AMD_DPM_FORCED_LEVEL_MANUAL;
+ } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
+ } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
+ } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
+ } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
+ } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ } else {
count = -EINVAL;
goto fail;
}
+ if (current_level == level)
+ return count;
+
if (adev->pp_enabled)
amdgpu_dpm_force_performance_level(adev, level);
else {
@@ -180,6 +213,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
adev->pm.dpm.forced_level = level;
mutex_unlock(&adev->pm.mutex);
}
+
fail:
return count;
}
@@ -1060,9 +1094,9 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
if (adev->pm.funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
- enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
+ enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
/* force low perf level for thermal */
- amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
+ amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
/* save the user's level */
adev->pm.dpm.forced_level = level;
} else {
@@ -1108,12 +1142,22 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
+ amdgpu_pm_compute_clocks(adev);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
} else {
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = false;
mutex_unlock(&adev->pm.mutex);
+ amdgpu_pm_compute_clocks(adev);
}
- amdgpu_pm_compute_clocks(adev);
+
}
}
@@ -1252,7 +1296,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
if (!adev->pm.dpm_enabled)
return;
- amdgpu_display_bandwidth_update(adev);
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_bandwidth_update(adev);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -1351,12 +1396,27 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
return 0;
}
+static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
+{
+ int i;
+
+ for (i = 0; clocks[i].flag; i++)
+ seq_printf(m, "\t%s: %s\n", clocks[i].name,
+ (flags & clocks[i].flag) ? "On" : "Off");
+}
+
static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
struct drm_device *ddev = adev->ddev;
+ u32 flags = 0;
+
+ amdgpu_get_clockgating_state(adev, &flags);
+ seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
+ amdgpu_parse_cg_state(m, flags);
+ seq_printf(m, "\n");
if (!adev->pm.dpm_enabled) {
seq_printf(m, "dpm not enabled\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index 5fd7734f15ca..c19c4d138751 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -24,6 +24,12 @@
#ifndef __AMDGPU_PM_H__
#define __AMDGPU_PM_H__
+struct cg_flag_name
+{
+ u32 flag;
+ const char *name;
+};
+
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 95a568df8551..8856eccc37fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -34,67 +34,34 @@
#include "cik_dpm.h"
#include "vi_dpm.h"
-static int amdgpu_powerplay_init(struct amdgpu_device *adev)
+static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
{
- int ret = 0;
+ struct amd_pp_init pp_init;
struct amd_powerplay *amd_pp;
+ int ret;
amd_pp = &(adev->powerplay);
-
- if (adev->pp_enabled) {
- struct amd_pp_init *pp_init;
-
- pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL);
-
- if (pp_init == NULL)
- return -ENOMEM;
-
- pp_init->chip_family = adev->family;
- pp_init->chip_id = adev->asic_type;
- pp_init->device = amdgpu_cgs_create_device(adev);
- ret = amd_powerplay_init(pp_init, amd_pp);
- kfree(pp_init);
- } else {
- amd_pp->pp_handle = (void *)adev;
-
- switch (adev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_SI
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- amd_pp->ip_funcs = &si_dpm_ip_funcs;
- break;
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- amd_pp->ip_funcs = &ci_dpm_ip_funcs;
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- case CHIP_KAVERI:
- amd_pp->ip_funcs = &kv_dpm_ip_funcs;
- break;
-#endif
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- amd_pp->ip_funcs = &cz_dpm_ip_funcs;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- }
- return ret;
+ pp_init.chip_family = adev->family;
+ pp_init.chip_id = adev->asic_type;
+ pp_init.pm_en = amdgpu_dpm != 0 ? true : false;
+ pp_init.feature_mask = amdgpu_pp_feature_mask;
+ pp_init.device = amdgpu_cgs_create_device(adev);
+ ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
+ if (ret)
+ return -EINVAL;
+ return 0;
}
static int amdgpu_pp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amd_powerplay *amd_pp;
int ret = 0;
+ amd_pp = &(adev->powerplay);
+ adev->pp_enabled = false;
+ amd_pp->pp_handle = (void *)adev;
+
switch (adev->asic_type) {
case CHIP_POLARIS11:
case CHIP_POLARIS10:
@@ -102,30 +69,48 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_TOPAZ:
- adev->pp_enabled = true;
- break;
case CHIP_CARRIZO:
case CHIP_STONEY:
- adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
+ adev->pp_enabled = true;
+ if (amdgpu_create_pp_handle(adev))
+ return -EINVAL;
+ amd_pp->ip_funcs = &pp_ip_funcs;
+ amd_pp->pp_funcs = &pp_dpm_funcs;
break;
/* These chips don't have powerplay implemenations */
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ amd_pp->ip_funcs = &si_dpm_ip_funcs;
+ break;
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
+ amd_pp->ip_funcs = &ci_dpm_ip_funcs;
+ break;
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
+ amd_pp->ip_funcs = &kv_dpm_ip_funcs;
+ break;
+#endif
default:
- adev->pp_enabled = false;
+ ret = -EINVAL;
break;
}
- ret = amdgpu_powerplay_init(adev);
- if (ret)
- return ret;
-
if (adev->powerplay.ip_funcs->early_init)
ret = adev->powerplay.ip_funcs->early_init(
adev->powerplay.pp_handle);
+
+ if (ret == PP_DPM_DISABLED) {
+ adev->pm.dpm_enabled = false;
+ return 0;
+ }
return ret;
}
@@ -185,6 +170,11 @@ static int amdgpu_pp_hw_init(void *handle)
ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle);
+ if (ret == PP_DPM_DISABLED) {
+ adev->pm.dpm_enabled = false;
+ return 0;
+ }
+
if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
adev->pm.dpm_enabled = true;
@@ -210,14 +200,14 @@ static void amdgpu_pp_late_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pp_enabled) {
- amdgpu_pm_sysfs_fini(adev);
- amd_powerplay_fini(adev->powerplay.pp_handle);
- }
-
if (adev->powerplay.ip_funcs->late_fini)
adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle);
+
+ if (adev->pp_enabled && adev->pm.dpm_enabled)
+ amdgpu_pm_sysfs_fini(adev);
+
+ amd_powerplay_destroy(adev->powerplay.pp_handle);
}
static int amdgpu_pp_suspend(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a47628395914..7c842b7f1004 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -207,6 +207,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
+ /* always set cond_exec_polling to CONTINUE */
+ *ring->cond_exe_cpu_addr = 1;
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) {
@@ -307,7 +309,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
while (size) {
if (*pos >= (ring->ring_size + 12))
return result;
-
+
value = ring->ring[(*pos - 12)/4];
r = put_user(value, (uint32_t*)buf);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 574f0b79c690..2345b39878c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -135,6 +135,8 @@ struct amdgpu_ring_funcs {
void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+ void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
+ void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
};
struct amdgpu_ring {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index bb964a8ff938..a18ae1e97860 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -24,7 +24,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
__entry->reg = reg;
__entry->value = value;
),
- TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+ TP_printk("0x%04lx, 0x%08lx, 0x%08lx",
(unsigned long)__entry->did,
(unsigned long)__entry->reg,
(unsigned long)__entry->value)
@@ -43,7 +43,7 @@ TRACE_EVENT(amdgpu_mm_wreg,
__entry->reg = reg;
__entry->value = value;
),
- TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+ TP_printk("0x%04lx, 0x%08lx, 0x%08lx",
(unsigned long)__entry->did,
(unsigned long)__entry->reg,
(unsigned long)__entry->value)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8e35c1ff59e3..4c6094eefc51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -466,10 +466,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
adev = amdgpu_ttm_adev(bo->bdev);
- /* remember the eviction */
- if (evict)
- atomic64_inc(&adev->num_evictions);
-
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
amdgpu_move_null(bo, new_mem);
return 0;
@@ -533,6 +529,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_TT:
break;
case TTM_PL_VRAM:
+ if (mem->start == AMDGPU_BO_INVALID_OFFSET)
+ return -EINVAL;
+
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
@@ -552,6 +551,8 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
mem->bus.addr =
ioremap_nocache(mem->bus.base + mem->bus.offset,
mem->bus.size);
+ if (!mem->bus.addr)
+ return -ENOMEM;
/*
* Alpha: Use just the bus offset plus
@@ -1052,56 +1053,6 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
return flags;
}
-static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- unsigned i, j;
-
- for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
- struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
-
- for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
- if (&tbo->lru == lru->lru[j])
- lru->lru[j] = tbo->lru.prev;
-
- if (&tbo->swap == lru->swap_lru)
- lru->swap_lru = tbo->swap.prev;
- }
-}
-
-static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- unsigned log2_size = min(ilog2(tbo->num_pages),
- AMDGPU_TTM_LRU_SIZE - 1);
-
- return &adev->mman.log2_size[log2_size];
-}
-
-static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
-{
- struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
- struct list_head *res = lru->lru[tbo->mem.mem_type];
-
- lru->lru[tbo->mem.mem_type] = &tbo->lru;
- while ((++lru)->lru[tbo->mem.mem_type] == res)
- lru->lru[tbo->mem.mem_type] = &tbo->lru;
-
- return res;
-}
-
-static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
-{
- struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
- struct list_head *res = lru->swap_lru;
-
- lru->swap_lru = &tbo->swap;
- while ((++lru)->swap_lru == res)
- lru->swap_lru = &tbo->swap;
-
- return res;
-}
-
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
@@ -1140,14 +1091,10 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
- .lru_removal = &amdgpu_ttm_lru_removal,
- .lru_tail = &amdgpu_ttm_lru_tail,
- .swap_lru_tail = &amdgpu_ttm_swap_lru_tail,
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
- unsigned i, j;
int r;
r = amdgpu_ttm_global_init(adev);
@@ -1165,19 +1112,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
-
- for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
- struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
-
- for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
- lru->lru[j] = &adev->mman.bdev.man[j].lru;
- lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
- }
-
- for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
- adev->mman.guard.lru[j] = NULL;
- adev->mman.guard.swap_lru = NULL;
-
adev->mman.initialized = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT);
@@ -1365,7 +1299,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
WARN_ON(job->ibs[0].length_dw > num_dw);
if (direct_submit) {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
- NULL, NULL, fence);
+ NULL, fence);
job->fence = dma_fence_get(*fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
@@ -1482,18 +1416,18 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
- int ret;
struct ttm_bo_global *glob = adev->mman.bdev.glob;
+ struct drm_printer p = drm_seq_file_printer(m);
spin_lock(&glob->lru_lock);
- ret = drm_mm_dump_table(m, mm);
+ drm_mm_print(mm, &p);
spin_unlock(&glob->lru_lock);
if (ttm_pl == TTM_PL_VRAM)
seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
adev->mman.bdev.man[ttm_pl].size,
(u64)atomic64_read(&adev->vram_usage) >> 20,
(u64)atomic64_read(&adev->vram_vis_usage) >> 20);
- return ret;
+ return 0;
}
static int ttm_pl_vram = TTM_PL_VRAM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 98ee384f0fca..6bdede8ff12b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -34,13 +34,6 @@
#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
-#define AMDGPU_TTM_LRU_SIZE 20
-
-struct amdgpu_mman_lru {
- struct list_head *lru[TTM_NUM_MEM_TYPES];
- struct list_head *swap_lru;
-};
-
struct amdgpu_mman {
struct ttm_bo_global_ref bo_global_ref;
struct drm_global_reference mem_global_ref;
@@ -58,11 +51,6 @@ struct amdgpu_mman {
struct amdgpu_ring *buffer_funcs_ring;
/* Scheduler entity for buffer moves */
struct amd_sched_entity entity;
-
- /* custom LRU management */
- struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
- /* guard for log2_size array, don't add anything in between */
- struct amdgpu_mman_lru guard;
};
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 1d564beb0fde..6d6ab7f11b4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -976,7 +976,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
job->fence = dma_fence_get(f);
if (r)
goto err_free;
@@ -1113,6 +1113,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
amdgpu_dpm_enable_uvd(adev, false);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ /* shutdown the UVD block */
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
@@ -1129,6 +1134,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_uvd(adev, true);
} else {
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
}
}
}
@@ -1178,3 +1187,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
return r;
}
+
+/**
+ * amdgpu_uvd_used_handles - returns used UVD handles
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns the number of UVD handles in use
+ */
+uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
+{
+ unsigned i;
+ uint32_t used_handles = 0;
+
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
+ /*
+ * Handles can be freed in any order, and not
+ * necessarily linear. So we need to count
+ * all non-zero handles.
+ */
+ if (atomic_read(&adev->uvd.handles[i]))
+ used_handles++;
+ }
+
+ return used_handles;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 6249ba1bde2a..c10682baccae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -38,5 +38,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring);
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 8fec802d3908..e2c06780ce49 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -321,6 +321,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
@@ -346,6 +350,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+
}
}
mutex_unlock(&adev->vce.idle_mutex);
@@ -455,7 +464,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
job->fence = dma_fence_get(f);
if (r)
goto err;
@@ -518,7 +527,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
job->fence = dma_fence_get(f);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
new file mode 100644
index 000000000000..dcfb7df3caf4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
+{
+ int r;
+ void *ptr;
+
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
+ &adev->virt.csa_vmid0_addr, &ptr);
+ if (r)
+ return r;
+
+ memset(ptr, 0, AMDGPU_CSA_SIZE);
+ return 0;
+}
+
+/*
+ * amdgpu_map_static_csa should be called during amdgpu_vm_init
+ * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
+ * to this VM, and each command submission of GFX should use this virtual
+ * address within META_DATA init package to support SRIOV gfx preemption.
+ */
+
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+ int r;
+ struct amdgpu_bo_va *bo_va;
+ struct ww_acquire_ctx ticket;
+ struct list_head list;
+ struct amdgpu_bo_list_entry pd;
+ struct ttm_validate_buffer csa_tv;
+
+ INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&csa_tv.head);
+ csa_tv.bo = &adev->virt.csa_obj->tbo;
+ csa_tv.shared = true;
+
+ list_add(&csa_tv.head, &list);
+ amdgpu_vm_get_pd_bo(vm, &list, &pd);
+
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ if (r) {
+ DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+ return r;
+ }
+
+ bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
+ if (!bo_va) {
+ ttm_eu_backoff_reservation(&ticket, &list);
+ DRM_ERROR("failed to create bo_va for static CSA\n");
+ return -ENOMEM;
+ }
+
+ r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
+ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+ AMDGPU_PTE_EXECUTABLE);
+
+ if (r) {
+ DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+ amdgpu_vm_bo_rmv(adev, bo_va);
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return r;
+ }
+
+ vm->csa_bo_va = bo_va;
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return 0;
+}
+
+void amdgpu_virt_init_setting(struct amdgpu_device *adev)
+{
+ /* enable virtual display */
+ adev->mode_info.num_crtc = 1;
+ adev->enable_virtual_display = true;
+
+ mutex_init(&adev->virt.lock);
+}
+
+uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
+{
+ signed long r;
+ uint32_t val;
+ struct dma_fence *f;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_rreg);
+
+ mutex_lock(&adev->virt.lock);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_hdp_flush(ring);
+ amdgpu_ring_emit_rreg(ring, reg);
+ amdgpu_ring_emit_hdp_invalidate(ring);
+ amdgpu_fence_emit(ring, &f);
+ amdgpu_ring_commit(ring);
+ mutex_unlock(&adev->virt.lock);
+
+ r = dma_fence_wait(f, false);
+ if (r)
+ DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ dma_fence_put(f);
+
+ val = adev->wb.wb[adev->virt.reg_val_offs];
+
+ return val;
+}
+
+void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+{
+ signed long r;
+ struct dma_fence *f;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_wreg);
+
+ mutex_lock(&adev->virt.lock);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_hdp_flush(ring);
+ amdgpu_ring_emit_wreg(ring, reg, v);
+ amdgpu_ring_emit_hdp_invalidate(ring);
+ amdgpu_fence_emit(ring, &f);
+ amdgpu_ring_commit(ring);
+ mutex_unlock(&adev->virt.lock);
+
+ r = dma_fence_wait(f, false);
+ if (r)
+ DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ dma_fence_put(f);
+}
+
+/**
+ * amdgpu_virt_request_full_gpu() - request full gpu access
+ * @amdgpu: amdgpu device.
+ * @init: is driver init time.
+ * When start to init/fini driver, first need to request full gpu access.
+ * Return: Zero if request success, otherwise will return error.
+ */
+int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int r;
+
+ if (virt->ops && virt->ops->req_full_gpu) {
+ r = virt->ops->req_full_gpu(adev, init);
+ if (r)
+ return r;
+
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_virt_release_full_gpu() - release full gpu access
+ * @amdgpu: amdgpu device.
+ * @init: is driver init time.
+ * When finishing driver init/fini, need to release full gpu access.
+ * Return: Zero if release success, otherwise will returen error.
+ */
+int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int r;
+
+ if (virt->ops && virt->ops->rel_full_gpu) {
+ r = virt->ops->rel_full_gpu(adev, init);
+ if (r)
+ return r;
+
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
+ }
+ return 0;
+}
+
+/**
+ * amdgpu_virt_reset_gpu() - reset gpu
+ * @amdgpu: amdgpu device.
+ * Send reset command to GPU hypervisor to reset GPU that VM is using
+ * Return: Zero if reset success, otherwise will return error.
+ */
+int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int r;
+
+ if (virt->ops && virt->ops->reset_gpu) {
+ r = virt->ops->reset_gpu(adev);
+ if (r)
+ return r;
+
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 2c37a374917f..675e12c42532 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -28,22 +28,48 @@
#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
+#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
+
+/**
+ * struct amdgpu_virt_ops - amdgpu device virt operations
+ */
+struct amdgpu_virt_ops {
+ int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
+ int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
+ int (*reset_gpu)(struct amdgpu_device *adev);
+};
+
/* GPU virtualization */
-struct amdgpu_virtualization {
- uint32_t virtual_caps;
+struct amdgpu_virt {
+ uint32_t caps;
+ struct amdgpu_bo *csa_obj;
+ uint64_t csa_vmid0_addr;
+ bool chained_ib_support;
+ uint32_t reg_val_offs;
+ struct mutex lock;
+ struct amdgpu_irq_src ack_irq;
+ struct amdgpu_irq_src rcv_irq;
+ struct delayed_work flr_work;
+ const struct amdgpu_virt_ops *ops;
};
+#define AMDGPU_CSA_SIZE (8 * 1024)
+#define AMDGPU_CSA_VADDR (AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE)
+
#define amdgpu_sriov_enabled(adev) \
-((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
+((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
#define amdgpu_sriov_vf(adev) \
-((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
+((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF)
#define amdgpu_sriov_bios(adev) \
-((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
+((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
+
+#define amdgpu_sriov_runtime(adev) \
+((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
#define amdgpu_passthrough(adev) \
-((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
+((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
static inline bool is_virtual_machine(void)
{
@@ -54,4 +80,14 @@ static inline bool is_virtual_machine(void)
#endif
}
-#endif \ No newline at end of file
+struct amdgpu_vm;
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_virt_init_setting(struct amdgpu_device *adev);
+uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
+void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
+int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
+int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1dda9321bd5a..bd0d33125c18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1293,7 +1293,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint32_t flags)
+ uint64_t size, uint64_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->vm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index adbc2f5e5c7f..18c72c0b478d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -111,6 +111,8 @@ struct amdgpu_vm {
/* client id */
u64 client_id;
+ /* each VM will map on CSA */
+ struct amdgpu_bo_va *csa_bo_va;
};
struct amdgpu_vm_id {
@@ -195,7 +197,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint32_t flags);
+ uint64_t size, uint64_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index d710226a0fff..9e577e3d3147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,8 +97,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
- enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
- enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
unsigned i;
int r;
@@ -121,10 +120,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!nodes)
return -ENOMEM;
- if (place->flags & TTM_PL_FLAG_TOPDOWN) {
- sflags = DRM_MM_SEARCH_BELOW;
- aflags = DRM_MM_CREATE_TOP;
- }
+ mode = DRM_MM_INSERT_BEST;
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ mode = DRM_MM_INSERT_HIGH;
pages_left = mem->num_pages;
@@ -135,13 +133,11 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (pages == pages_per_node)
alignment = pages_per_node;
- else
- sflags |= DRM_MM_SEARCH_BEST;
- r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
- alignment, 0,
- place->fpfn, lpfn,
- sflags, aflags);
+ r = drm_mm_insert_node_in_range(mm, &nodes[i],
+ pages, alignment, 0,
+ place->fpfn, lpfn,
+ mode);
if (unlikely(r))
goto error;
@@ -207,9 +203,10 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&mgr->lock);
- drm_mm_debug_table(&mgr->mm, prefix);
+ drm_mm_print(&mgr->mm, &p);
spin_unlock(&mgr->lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index c32eca26155c..2af26d2da127 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -181,9 +181,6 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
if (!amdgpu_encoder->enc_priv)
return;
- if (!adev->is_atom_bios)
- return;
-
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return;
@@ -236,9 +233,6 @@ amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder)
if (!amdgpu_encoder->enc_priv)
return;
- if (!adev->is_atom_bios)
- return;
-
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index bda9e3de191e..f97ecb49972e 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -889,7 +889,16 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
pi->uvd_power_gated = gate;
- ci_update_uvd_dpm(adev, gate);
+ if (gate) {
+ /* stop the UVD block */
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ ci_update_uvd_dpm(adev, gate);
+ } else {
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
+ ci_update_uvd_dpm(adev, gate);
+ }
}
static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
@@ -2201,7 +2210,6 @@ static void ci_clear_vc(struct amdgpu_device *adev)
static int ci_upload_firmware(struct amdgpu_device *adev)
{
- struct ci_power_info *pi = ci_get_pi(adev);
int i, ret;
if (amdgpu_ci_is_smc_running(adev)) {
@@ -2218,7 +2226,7 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
amdgpu_ci_stop_smc_clock(adev);
amdgpu_ci_reset_smc(adev);
- ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
+ ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
return ret;
@@ -4248,12 +4256,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
if (amdgpu_new_state->evclk) {
- /* turn the clocks on when encoding */
- ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- if (ret)
- return ret;
-
pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
tmp = RREG32_SMC(ixDPM_TABLE_475);
tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
@@ -4265,9 +4267,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
ret = ci_enable_vce_dpm(adev, false);
if (ret)
return ret;
- /* turn the clocks off when not encoding */
- ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
}
}
return ret;
@@ -4336,13 +4335,13 @@ static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
- enum amdgpu_dpm_forced_level level)
+ enum amd_dpm_forced_level level)
{
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp, levels, i;
int ret;
- if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+ if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
if ((!pi->pcie_dpm_key_disabled) &&
pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
levels = 0;
@@ -4403,7 +4402,7 @@ static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
}
}
}
- } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+ } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
if ((!pi->sclk_dpm_key_disabled) &&
pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
levels = ci_get_lowest_enabled_level(adev,
@@ -4452,7 +4451,7 @@ static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
udelay(1);
}
}
- } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+ } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
if (!pi->pcie_dpm_key_disabled) {
PPSMC_Result smc_result;
@@ -6262,20 +6261,20 @@ static int ci_dpm_sw_init(void *handle)
/* default to balanced state */
adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+ adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
adev->pm.default_sclk = adev->clock.default_sclk;
adev->pm.default_mclk = adev->clock.default_mclk;
adev->pm.current_sclk = adev->clock.default_sclk;
adev->pm.current_mclk = adev->clock.default_mclk;
adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
- if (amdgpu_dpm == 0)
- return 0;
-
ret = ci_dpm_init_microcode(adev);
if (ret)
return ret;
+ if (amdgpu_dpm == 0)
+ return 0;
+
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
mutex_lock(&adev->pm.mutex);
ret = ci_dpm_init(adev);
@@ -6319,8 +6318,15 @@ static int ci_dpm_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!amdgpu_dpm)
+ if (!amdgpu_dpm) {
+ ret = ci_upload_firmware(adev);
+ if (ret) {
+ DRM_ERROR("ci_upload_firmware failed\n");
+ return ret;
+ }
+ ci_dpm_start_smc(adev);
return 0;
+ }
mutex_lock(&adev->pm.mutex);
ci_dpm_setup_asic(adev);
@@ -6342,6 +6348,8 @@ static int ci_dpm_hw_fini(void *handle)
mutex_lock(&adev->pm.mutex);
ci_dpm_disable(adev);
mutex_unlock(&adev->pm.mutex);
+ } else {
+ ci_dpm_stop_smc(adev);
}
return 0;
@@ -6571,8 +6579,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
{
struct ci_power_info *pi = ci_get_pi(adev);
- if (adev->pm.dpm.forced_level
- != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
+ if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ AMD_DPM_FORCED_LEVEL_LOW |
+ AMD_DPM_FORCED_LEVEL_HIGH))
return -EINVAL;
switch (type) {
@@ -6739,12 +6748,3 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
}
-
-const struct amdgpu_ip_block_version ci_dpm_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &ci_dpm_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 302df85893ab..c4d4b35e54ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1176,6 +1176,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
/* enable BM */
pci_set_master(adev->pdev);
+ adev->has_hw_reset = true;
r = 0;
break;
}
@@ -1627,14 +1628,13 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
{
if (is_virtual_machine()) /* passthrough mode */
- adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
.read_bios_from_rom = &cik_read_bios_from_rom,
- .detect_hw_virtualization = cik_detect_hw_virtualization,
.read_register = &cik_read_register,
.reset = &cik_asic_reset,
.set_vga_state = &cik_vga_set_state,
@@ -1723,8 +1723,8 @@ static int cik_common_early_init(void *handle)
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_DMG |*/
AMD_PG_SUPPORT_UVD |
- /*AMD_PG_SUPPORT_VCE |
- AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_VCE |
+ /* AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_ACP |
@@ -1890,6 +1890,8 @@ static const struct amdgpu_ip_block_version cik_common_ip_block =
int cik_set_ip_blocks(struct amdgpu_device *adev)
{
+ cik_detect_hw_virtualization(adev);
+
switch (adev->asic_type) {
case CHIP_BONAIRE:
amdgpu_ip_block_add(adev, &cik_common_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 4c34dbc7a254..810bba533975 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -651,7 +651,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
index 66e39cdb5cb0..66e39cdb5cb0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
deleted file mode 100644
index ba2b66be9022..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ /dev/null
@@ -1,2320 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include <linux/seq_file.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_atombios.h"
-#include "vid.h"
-#include "vi_dpm.h"
-#include "amdgpu_dpm.h"
-#include "cz_dpm.h"
-#include "cz_ppsmc.h"
-#include "atom.h"
-
-#include "smu/smu_8_0_d.h"
-#include "smu/smu_8_0_sh_mask.h"
-#include "gca/gfx_8_0_d.h"
-#include "gca/gfx_8_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "bif/bif_5_1_d.h"
-#include "gfx_v8_0.h"
-
-static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
-static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
-static void cz_dpm_fini(struct amdgpu_device *adev);
-
-static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
-{
- struct cz_ps *ps = rps->ps_priv;
-
- return ps;
-}
-
-static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
- uint16_t voltage)
-{
- uint16_t tmp = 6200 - voltage * 25;
-
- return tmp;
-}
-
-static void cz_construct_max_power_limits_table(struct amdgpu_device *adev,
- struct amdgpu_clock_and_voltage_limits *table)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *dep_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- if (dep_table->count > 0) {
- table->sclk = dep_table->entries[dep_table->count - 1].clk;
- table->vddc = cz_convert_8bit_index_to_voltage(adev,
- dep_table->entries[dep_table->count - 1].v);
- }
-
- table->mclk = pi->sys_info.nbp_memory_clock[0];
-
-}
-
-union igp_info {
- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
-};
-
-static int cz_parse_sys_info_table(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
- union igp_info *igp_info;
- u8 frev, crev;
- u16 data_offset;
- int i = 0;
-
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset)) {
- igp_info = (union igp_info *)(mode_info->atom_context->bios +
- data_offset);
-
- if (crev != 9) {
- DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
- return -EINVAL;
- }
- pi->sys_info.bootup_sclk =
- le32_to_cpu(igp_info->info_9.ulBootUpEngineClock);
- pi->sys_info.bootup_uma_clk =
- le32_to_cpu(igp_info->info_9.ulBootUpUMAClock);
- pi->sys_info.dentist_vco_freq =
- le32_to_cpu(igp_info->info_9.ulDentistVCOFreq);
- pi->sys_info.bootup_nb_voltage_index =
- le16_to_cpu(igp_info->info_9.usBootUpNBVoltage);
-
- if (igp_info->info_9.ucHtcTmpLmt == 0)
- pi->sys_info.htc_tmp_lmt = 203;
- else
- pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt;
-
- if (igp_info->info_9.ucHtcHystLmt == 0)
- pi->sys_info.htc_hyst_lmt = 5;
- else
- pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt;
-
- if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
- DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
- return -EINVAL;
- }
-
- if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) &&
- pi->enable_nb_ps_policy)
- pi->sys_info.nb_dpm_enable = true;
- else
- pi->sys_info.nb_dpm_enable = false;
-
- for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
- if (i < CZ_NUM_NBPMEMORY_CLOCK)
- pi->sys_info.nbp_memory_clock[i] =
- le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]);
- pi->sys_info.nbp_n_clock[i] =
- le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]);
- }
-
- for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++)
- pi->sys_info.display_clock[i] =
- le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
-
- for (i = 0; i < CZ_NUM_NBPSTATES; i++)
- pi->sys_info.nbp_voltage_index[i] =
- le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]);
-
- if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
- pi->caps_enable_dfs_bypass = true;
-
- pi->sys_info.uma_channel_number =
- igp_info->info_9.ucUMAChannelNumber;
-
- cz_construct_max_power_limits_table(adev,
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
- }
-
- return 0;
-}
-
-static void cz_patch_voltage_values(struct amdgpu_device *adev)
-{
- int i;
- struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table *acp_table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
-
- if (uvd_table->count) {
- for (i = 0; i < uvd_table->count; i++)
- uvd_table->entries[i].v =
- cz_convert_8bit_index_to_voltage(adev,
- uvd_table->entries[i].v);
- }
-
- if (vce_table->count) {
- for (i = 0; i < vce_table->count; i++)
- vce_table->entries[i].v =
- cz_convert_8bit_index_to_voltage(adev,
- vce_table->entries[i].v);
- }
-
- if (acp_table->count) {
- for (i = 0; i < acp_table->count; i++)
- acp_table->entries[i].v =
- cz_convert_8bit_index_to_voltage(adev,
- acp_table->entries[i].v);
- }
-
-}
-
-static void cz_construct_boot_state(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
- pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
- pi->boot_pl.ds_divider_index = 0;
- pi->boot_pl.ss_divider_index = 0;
- pi->boot_pl.allow_gnb_slow = 1;
- pi->boot_pl.force_nbp_state = 0;
- pi->boot_pl.display_wm = 0;
- pi->boot_pl.vce_wm = 0;
-
-}
-
-static void cz_patch_boot_state(struct amdgpu_device *adev,
- struct cz_ps *ps)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- ps->num_levels = 1;
- ps->levels[0] = pi->boot_pl;
-}
-
-union pplib_clock_info {
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
- struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
- struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo;
-};
-
-static void cz_parse_pplib_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps, int index,
- union pplib_clock_info *clock_info)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct cz_ps *ps = cz_get_ps(rps);
- struct cz_pl *pl = &ps->levels[index];
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- pl->sclk = table->entries[clock_info->carrizo.index].clk;
- pl->vddc_index = table->entries[clock_info->carrizo.index].v;
-
- ps->num_levels = index + 1;
-
- if (pi->caps_sclk_ds) {
- pl->ds_divider_index = 5;
- pl->ss_divider_index = 5;
- }
-
-}
-
-static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps,
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
- u8 table_rev)
-{
- struct cz_ps *ps = cz_get_ps(rps);
-
- rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
- rps->class = le16_to_cpu(non_clock_info->usClassification);
- rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
-
- if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
- rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
- rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else {
- rps->vclk = 0;
- rps->dclk = 0;
- }
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
- adev->pm.dpm.boot_ps = rps;
- cz_patch_boot_state(adev, ps);
- }
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- adev->pm.dpm.uvd_ps = rps;
-
-}
-
-union power_info {
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
- struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
- struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
-};
-
-union pplib_power_state {
- struct _ATOM_PPLIB_STATE v1;
- struct _ATOM_PPLIB_STATE_V2 v2;
-};
-
-static int cz_parse_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
- union pplib_power_state *power_state;
- int i, j, k, non_clock_array_index, clock_array_index;
- union pplib_clock_info *clock_info;
- struct _StateArray *state_array;
- struct _ClockInfoArray *clock_info_array;
- struct _NonClockInfoArray *non_clock_info_array;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- u8 *power_state_offset;
- struct cz_ps *ps;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- state_array = (struct _StateArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usStateArrayOffset));
- clock_info_array = (struct _ClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
- non_clock_info_array = (struct _NonClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
-
- adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
- state_array->ucNumEntries, GFP_KERNEL);
-
- if (!adev->pm.dpm.ps)
- return -ENOMEM;
-
- power_state_offset = (u8 *)state_array->states;
- adev->pm.dpm.platform_caps =
- le32_to_cpu(power_info->pplib.ulPlatformCaps);
- adev->pm.dpm.backbias_response_time =
- le16_to_cpu(power_info->pplib.usBackbiasTime);
- adev->pm.dpm.voltage_response_time =
- le16_to_cpu(power_info->pplib.usVoltageTime);
-
- for (i = 0; i < state_array->ucNumEntries; i++) {
- power_state = (union pplib_power_state *)power_state_offset;
- non_clock_array_index = power_state->v2.nonClockInfoIndex;
- non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
- &non_clock_info_array->nonClockInfo[non_clock_array_index];
-
- ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
- if (ps == NULL) {
- for (j = 0; j < i; j++)
- kfree(adev->pm.dpm.ps[j].ps_priv);
- kfree(adev->pm.dpm.ps);
- return -ENOMEM;
- }
-
- adev->pm.dpm.ps[i].ps_priv = ps;
- k = 0;
- for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
- if (k >= CZ_MAX_HARDWARE_POWERLEVELS)
- break;
- clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index *
- clock_info_array->ucEntrySize];
- cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i],
- k, clock_info);
- k++;
- }
- cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
- non_clock_info,
- non_clock_info_array->ucEntrySize);
- power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
- }
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
-
- return 0;
-}
-
-static int cz_process_firmware_header(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION +
- offsetof(struct SMU8_Firmware_Header,
- DpmTable),
- &tmp, pi->sram_end);
-
- if (ret == 0)
- pi->dpm_table_start = tmp;
-
- return ret;
-}
-
-static int cz_dpm_init(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi;
- int ret, i;
-
- pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL);
- if (NULL == pi)
- return -ENOMEM;
-
- adev->pm.dpm.priv = pi;
-
- ret = amdgpu_get_platform_caps(adev);
- if (ret)
- goto err;
-
- ret = amdgpu_parse_extended_power_table(adev);
- if (ret)
- goto err;
-
- pi->sram_end = SMC_RAM_END;
-
- /* set up DPM defaults */
- for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
- pi->active_target[i] = CZ_AT_DFLT;
-
- pi->mgcg_cgtt_local0 = 0x0;
- pi->mgcg_cgtt_local1 = 0x0;
- pi->clock_slow_down_step = 25000;
- pi->skip_clock_slow_down = 1;
- pi->enable_nb_ps_policy = false;
- pi->caps_power_containment = true;
- pi->caps_cac = true;
- pi->didt_enabled = false;
- if (pi->didt_enabled) {
- pi->caps_sq_ramping = true;
- pi->caps_db_ramping = true;
- pi->caps_td_ramping = true;
- pi->caps_tcp_ramping = true;
- }
- if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
- pi->caps_sclk_ds = true;
- else
- pi->caps_sclk_ds = false;
-
- pi->voting_clients = 0x00c00033;
- pi->auto_thermal_throttling_enabled = true;
- pi->bapm_enabled = false;
- pi->disable_nb_ps3_in_battery = false;
- pi->voltage_drop_threshold = 0;
- pi->caps_sclk_throttle_low_notification = false;
- pi->gfx_pg_threshold = 500;
- pi->caps_fps = true;
- /* uvd */
- pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
- pi->caps_uvd_dpm = true;
- /* vce */
- pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
- pi->caps_vce_dpm = true;
- /* acp */
- pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
- pi->caps_acp_dpm = true;
-
- pi->caps_stable_power_state = false;
- pi->nb_dpm_enabled_by_driver = true;
- pi->nb_dpm_enabled = false;
- pi->caps_voltage_island = false;
- /* flags which indicate need to upload pptable */
- pi->need_pptable_upload = true;
-
- ret = cz_parse_sys_info_table(adev);
- if (ret)
- goto err;
-
- cz_patch_voltage_values(adev);
- cz_construct_boot_state(adev);
-
- ret = cz_parse_power_table(adev);
- if (ret)
- goto err;
-
- ret = cz_process_firmware_header(adev);
- if (ret)
- goto err;
-
- pi->dpm_enabled = true;
- pi->uvd_dynamic_pg = false;
-
- return 0;
-err:
- cz_dpm_fini(adev);
- return ret;
-}
-
-static void cz_dpm_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->pm.dpm.num_ps; i++)
- kfree(adev->pm.dpm.ps[i].ps_priv);
-
- kfree(adev->pm.dpm.ps);
- kfree(adev->pm.dpm.priv);
- amdgpu_free_extended_power_table(adev);
-}
-
-#define ixSMUSVI_NB_CURRENTVID 0xD8230044
-#define CURRENT_NB_VID_MASK 0xff000000
-#define CURRENT_NB_VID__SHIFT 24
-#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
-#define CURRENT_GFX_VID_MASK 0xff000000
-#define CURRENT_GFX_VID__SHIFT 24
-
-static void
-cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
- struct seq_file *m)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
- TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
- u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
- TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
- u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
- TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
- u32 sclk, vclk, dclk, ecclk, tmp;
- u16 vddnb, vddgfx;
-
- if (sclk_index >= NUM_SCLK_LEVELS) {
- seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
- } else {
- sclk = table->entries[sclk_index].clk;
- seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
- }
-
- tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
- CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
- vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
- tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
- CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
- vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
- seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
-
- seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
- if (!pi->uvd_power_gated) {
- if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
- seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
- } else {
- vclk = uvd_table->entries[uvd_index].vclk;
- dclk = uvd_table->entries[uvd_index].dclk;
- seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
- }
- }
-
- seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
- if (!pi->vce_power_gated) {
- if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
- seq_printf(m, "invalid vce dpm level %d\n", vce_index);
- } else {
- ecclk = vce_table->entries[vce_index].ecclk;
- seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
- }
- }
-}
-
-static void cz_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- int i;
- struct cz_ps *ps = cz_get_ps(rps);
-
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
-
- DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- for (i = 0; i < ps->num_levels; i++) {
- struct cz_pl *pl = &ps->levels[i];
-
- DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n",
- i, pl->sclk,
- cz_convert_8bit_index_to_voltage(adev, pl->vddc_index));
- }
-
- amdgpu_dpm_print_ps_status(adev, rps);
-}
-
-static void cz_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int cz_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cz_dpm_set_funcs(adev);
-
- return 0;
-}
-
-
-static int cz_dpm_late_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (amdgpu_dpm) {
- int ret;
- /* init the sysfs and debugfs files late */
- ret = amdgpu_pm_sysfs_init(adev);
- if (ret)
- return ret;
-
- /* powerdown unused blocks for now */
- cz_dpm_powergate_uvd(adev, true);
- cz_dpm_powergate_vce(adev, true);
- }
-
- return 0;
-}
-
-static int cz_dpm_sw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int ret = 0;
- /* fix me to add thermal support TODO */
-
- /* default to balanced state */
- adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
- adev->pm.default_sclk = adev->clock.default_sclk;
- adev->pm.default_mclk = adev->clock.default_mclk;
- adev->pm.current_sclk = adev->clock.default_sclk;
- adev->pm.current_mclk = adev->clock.default_mclk;
- adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
-
- if (amdgpu_dpm == 0)
- return 0;
-
- mutex_lock(&adev->pm.mutex);
- ret = cz_dpm_init(adev);
- if (ret)
- goto dpm_init_failed;
-
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- if (amdgpu_dpm == 1)
- amdgpu_pm_print_power_states(adev);
-
- mutex_unlock(&adev->pm.mutex);
- DRM_INFO("amdgpu: dpm initialized\n");
-
- return 0;
-
-dpm_init_failed:
- cz_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
-
- return ret;
-}
-
-static int cz_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
- cz_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
-
- return 0;
-}
-
-static void cz_reset_ap_mask(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- pi->active_process_mask = 0;
-}
-
-static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
- void **table)
-{
- return cz_smu_download_pptable(adev, table);
-}
-
-static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct SMU8_Fusion_ClkTable *clock_table;
- struct atom_clock_dividers dividers;
- void *table = NULL;
- uint8_t i = 0;
- int ret = 0;
-
- struct amdgpu_clock_voltage_dependency_table *vddc_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table *vddgfx_table =
- &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk;
- struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table *acp_table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
-
- if (!pi->need_pptable_upload)
- return 0;
-
- ret = cz_dpm_download_pptable_from_smu(adev, &table);
- if (ret) {
- DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n");
- return -EINVAL;
- }
-
- clock_table = (struct SMU8_Fusion_ClkTable *)table;
- /* patch clock table */
- if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
- vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
- uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
- vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
- acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) {
- DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n");
- return -EINVAL;
- }
-
- for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
-
- /* vddc sclk */
- clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
- (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
- clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
- (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
- false, &dividers);
- if (ret)
- return ret;
- clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
- (uint8_t)dividers.post_divider;
-
- /* vddgfx sclk */
- clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
- (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0;
-
- /* acp breakdown */
- clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
- (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
- clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
- (i < acp_table->count) ? acp_table->entries[i].clk : 0;
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
- false, &dividers);
- if (ret)
- return ret;
- clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
- (uint8_t)dividers.post_divider;
-
- /* uvd breakdown */
- clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
- (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
- clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
- (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
- false, &dividers);
- if (ret)
- return ret;
- clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
- (uint8_t)dividers.post_divider;
-
- clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
- (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
- clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
- (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
- false, &dividers);
- if (ret)
- return ret;
- clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
- (uint8_t)dividers.post_divider;
-
- /* vce breakdown */
- clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
- (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
- clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
- (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
- false, &dividers);
- if (ret)
- return ret;
- clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
- (uint8_t)dividers.post_divider;
- }
-
- /* its time to upload to SMU */
- ret = cz_smu_upload_pptable(adev);
- if (ret) {
- DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n");
- return ret;
- }
-
- return 0;
-}
-
-static void cz_init_sclk_limit(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- uint32_t clock = 0, level;
-
- if (!table || !table->count) {
- DRM_ERROR("Invalid Voltage Dependency table.\n");
- return;
- }
-
- pi->sclk_dpm.soft_min_clk = 0;
- pi->sclk_dpm.hard_min_clk = 0;
- cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
- level = cz_get_argument(adev);
- if (level < table->count) {
- clock = table->entries[level].clk;
- } else {
- DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
- clock = table->entries[table->count - 1].clk;
- }
-
- pi->sclk_dpm.soft_max_clk = clock;
- pi->sclk_dpm.hard_max_clk = clock;
-
-}
-
-static void cz_init_uvd_limit(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_uvd_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- uint32_t clock = 0, level;
-
- if (!table || !table->count) {
- DRM_ERROR("Invalid Voltage Dependency table.\n");
- return;
- }
-
- pi->uvd_dpm.soft_min_clk = 0;
- pi->uvd_dpm.hard_min_clk = 0;
- cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
- level = cz_get_argument(adev);
- if (level < table->count) {
- clock = table->entries[level].vclk;
- } else {
- DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
- clock = table->entries[table->count - 1].vclk;
- }
-
- pi->uvd_dpm.soft_max_clk = clock;
- pi->uvd_dpm.hard_max_clk = clock;
-
-}
-
-static void cz_init_vce_limit(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- uint32_t clock = 0, level;
-
- if (!table || !table->count) {
- DRM_ERROR("Invalid Voltage Dependency table.\n");
- return;
- }
-
- pi->vce_dpm.soft_min_clk = table->entries[0].ecclk;
- pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;
- cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
- level = cz_get_argument(adev);
- if (level < table->count) {
- clock = table->entries[level].ecclk;
- } else {
- /* future BIOS would fix this error */
- DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
- clock = table->entries[table->count - 1].ecclk;
- }
-
- pi->vce_dpm.soft_max_clk = clock;
- pi->vce_dpm.hard_max_clk = clock;
-
-}
-
-static void cz_init_acp_limit(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
- uint32_t clock = 0, level;
-
- if (!table || !table->count) {
- DRM_ERROR("Invalid Voltage Dependency table.\n");
- return;
- }
-
- pi->acp_dpm.soft_min_clk = 0;
- pi->acp_dpm.hard_min_clk = 0;
- cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
- level = cz_get_argument(adev);
- if (level < table->count) {
- clock = table->entries[level].clk;
- } else {
- DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
- clock = table->entries[table->count - 1].clk;
- }
-
- pi->acp_dpm.soft_max_clk = clock;
- pi->acp_dpm.hard_max_clk = clock;
-
-}
-
-static void cz_init_pg_state(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- pi->uvd_power_gated = false;
- pi->vce_power_gated = false;
- pi->acp_power_gated = false;
-
-}
-
-static void cz_init_sclk_threshold(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- pi->low_sclk_interrupt_threshold = 0;
-}
-
-static void cz_dpm_setup_asic(struct amdgpu_device *adev)
-{
- cz_reset_ap_mask(adev);
- cz_dpm_upload_pptable_to_smu(adev);
- cz_init_sclk_limit(adev);
- cz_init_uvd_limit(adev);
- cz_init_vce_limit(adev);
- cz_init_acp_limit(adev);
- cz_init_pg_state(adev);
- cz_init_sclk_threshold(adev);
-
-}
-
-static bool cz_check_smu_feature(struct amdgpu_device *adev,
- uint32_t feature)
-{
- uint32_t smu_feature = 0;
- int ret;
-
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_GetFeatureStatus, 0);
- if (ret) {
- DRM_ERROR("Failed to get SMU features from SMC.\n");
- return false;
- } else {
- smu_feature = cz_get_argument(adev);
- if (feature & smu_feature)
- return true;
- }
-
- return false;
-}
-
-static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev)
-{
- if (cz_check_smu_feature(adev,
- SMU_EnabledFeatureScoreboard_SclkDpmOn))
- return true;
-
- return false;
-}
-
-static void cz_program_voting_clients(struct amdgpu_device *adev)
-{
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
-}
-
-static void cz_clear_voting_clients(struct amdgpu_device *adev)
-{
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
-}
-
-static int cz_start_dpm(struct amdgpu_device *adev)
-{
- int ret = 0;
-
- if (amdgpu_dpm) {
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK);
- if (ret) {
- DRM_ERROR("SMU feature: SCLK_DPM enable failed\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int cz_stop_dpm(struct amdgpu_device *adev)
-{
- int ret = 0;
-
- if (amdgpu_dpm && adev->pm.dpm_enabled) {
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK);
- if (ret) {
- DRM_ERROR("SMU feature: SCLK_DPM disable failed\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static uint32_t cz_get_sclk_level(struct amdgpu_device *adev,
- uint32_t clock, uint16_t msg)
-{
- int i = 0;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- switch (msg) {
- case PPSMC_MSG_SetSclkSoftMin:
- case PPSMC_MSG_SetSclkHardMin:
- for (i = 0; i < table->count; i++)
- if (clock <= table->entries[i].clk)
- break;
- if (i == table->count)
- i = table->count - 1;
- break;
- case PPSMC_MSG_SetSclkSoftMax:
- case PPSMC_MSG_SetSclkHardMax:
- for (i = table->count - 1; i >= 0; i--)
- if (clock >= table->entries[i].clk)
- break;
- if (i < 0)
- i = 0;
- break;
- default:
- break;
- }
-
- return i;
-}
-
-static uint32_t cz_get_eclk_level(struct amdgpu_device *adev,
- uint32_t clock, uint16_t msg)
-{
- int i = 0;
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
-
- if (table->count == 0)
- return 0;
-
- switch (msg) {
- case PPSMC_MSG_SetEclkSoftMin:
- case PPSMC_MSG_SetEclkHardMin:
- for (i = 0; i < table->count-1; i++)
- if (clock <= table->entries[i].ecclk)
- break;
- break;
- case PPSMC_MSG_SetEclkSoftMax:
- case PPSMC_MSG_SetEclkHardMax:
- for (i = table->count - 1; i > 0; i--)
- if (clock >= table->entries[i].ecclk)
- break;
- break;
- default:
- break;
- }
-
- return i;
-}
-
-static uint32_t cz_get_uvd_level(struct amdgpu_device *adev,
- uint32_t clock, uint16_t msg)
-{
- int i = 0;
- struct amdgpu_uvd_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
-
- switch (msg) {
- case PPSMC_MSG_SetUvdSoftMin:
- case PPSMC_MSG_SetUvdHardMin:
- for (i = 0; i < table->count; i++)
- if (clock <= table->entries[i].vclk)
- break;
- if (i == table->count)
- i = table->count - 1;
- break;
- case PPSMC_MSG_SetUvdSoftMax:
- case PPSMC_MSG_SetUvdHardMax:
- for (i = table->count - 1; i >= 0; i--)
- if (clock >= table->entries[i].vclk)
- break;
- if (i < 0)
- i = 0;
- break;
- default:
- break;
- }
-
- return i;
-}
-
-static int cz_program_bootup_state(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- uint32_t soft_min_clk = 0;
- uint32_t soft_max_clk = 0;
- int ret = 0;
-
- pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk;
- pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk;
-
- soft_min_clk = cz_get_sclk_level(adev,
- pi->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin);
- soft_max_clk = cz_get_sclk_level(adev,
- pi->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax);
-
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetSclkSoftMin, soft_min_clk);
- if (ret)
- return -EINVAL;
-
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetSclkSoftMax, soft_max_clk);
- if (ret)
- return -EINVAL;
-
- return 0;
-}
-
-/* TODO */
-static int cz_disable_cgpg(struct amdgpu_device *adev)
-{
- return 0;
-}
-
-/* TODO */
-static int cz_enable_cgpg(struct amdgpu_device *adev)
-{
- return 0;
-}
-
-/* TODO */
-static int cz_program_pt_config_registers(struct amdgpu_device *adev)
-{
- return 0;
-}
-
-static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- uint32_t reg = 0;
-
- if (pi->caps_sq_ramping) {
- reg = RREG32_DIDT(ixDIDT_SQ_CTRL0);
- if (enable)
- reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
- else
- reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
- WREG32_DIDT(ixDIDT_SQ_CTRL0, reg);
- }
- if (pi->caps_db_ramping) {
- reg = RREG32_DIDT(ixDIDT_DB_CTRL0);
- if (enable)
- reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1);
- else
- reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0);
- WREG32_DIDT(ixDIDT_DB_CTRL0, reg);
- }
- if (pi->caps_td_ramping) {
- reg = RREG32_DIDT(ixDIDT_TD_CTRL0);
- if (enable)
- reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1);
- else
- reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0);
- WREG32_DIDT(ixDIDT_TD_CTRL0, reg);
- }
- if (pi->caps_tcp_ramping) {
- reg = RREG32_DIDT(ixDIDT_TCP_CTRL0);
- if (enable)
- reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
- else
- reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
- WREG32_DIDT(ixDIDT_TCP_CTRL0, reg);
- }
-
-}
-
-static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- int ret;
-
- if (pi->caps_sq_ramping || pi->caps_db_ramping ||
- pi->caps_td_ramping || pi->caps_tcp_ramping) {
- if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
- ret = cz_disable_cgpg(adev);
- if (ret) {
- DRM_ERROR("Pre Di/Dt disable cg/pg failed\n");
- return -EINVAL;
- }
- adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE;
- }
-
- ret = cz_program_pt_config_registers(adev);
- if (ret) {
- DRM_ERROR("Di/Dt config failed\n");
- return -EINVAL;
- }
- cz_do_enable_didt(adev, enable);
-
- if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) {
- ret = cz_enable_cgpg(adev);
- if (ret) {
- DRM_ERROR("Post Di/Dt enable cg/pg failed\n");
- return -EINVAL;
- }
- adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
- }
- }
-
- return 0;
-}
-
-/* TODO */
-static void cz_reset_acp_boot_level(struct amdgpu_device *adev)
-{
-}
-
-static void cz_update_current_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct cz_ps *ps = cz_get_ps(rps);
-
- pi->current_ps = *ps;
- pi->current_rps = *rps;
- pi->current_rps.ps_priv = &pi->current_ps;
- adev->pm.dpm.current_ps = &pi->current_rps;
-
-}
-
-static void cz_update_requested_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct cz_ps *ps = cz_get_ps(rps);
-
- pi->requested_ps = *ps;
- pi->requested_rps = *rps;
- pi->requested_rps.ps_priv = &pi->requested_ps;
- adev->pm.dpm.requested_ps = &pi->requested_rps;
-
-}
-
-/* PP arbiter support needed TODO */
-static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps,
- struct amdgpu_ps *old_rps)
-{
- struct cz_ps *ps = cz_get_ps(new_rps);
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_clock_and_voltage_limits *limits =
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- /* 10kHz memory clock */
- uint32_t mclk = 0;
-
- ps->force_high = false;
- ps->need_dfs_bypass = true;
- pi->video_start = new_rps->dclk || new_rps->vclk ||
- new_rps->evclk || new_rps->ecclk;
-
- if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
- ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
- pi->battery_state = true;
- else
- pi->battery_state = false;
-
- if (pi->caps_stable_power_state)
- mclk = limits->mclk;
-
- if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1])
- ps->force_high = true;
-
-}
-
-static int cz_dpm_enable(struct amdgpu_device *adev)
-{
- const char *chip_name;
- int ret = 0;
-
- /* renable will hang up SMU, so check first */
- if (cz_check_for_dpm_enabled(adev))
- return -EINVAL;
-
- cz_program_voting_clients(adev);
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- chip_name = "carrizo";
- break;
- case CHIP_STONEY:
- chip_name = "stoney";
- break;
- default:
- BUG();
- }
-
-
- ret = cz_start_dpm(adev);
- if (ret) {
- DRM_ERROR("%s DPM enable failed\n", chip_name);
- return -EINVAL;
- }
-
- ret = cz_program_bootup_state(adev);
- if (ret) {
- DRM_ERROR("%s bootup state program failed\n", chip_name);
- return -EINVAL;
- }
-
- ret = cz_enable_didt(adev, true);
- if (ret) {
- DRM_ERROR("%s enable di/dt failed\n", chip_name);
- return -EINVAL;
- }
-
- cz_reset_acp_boot_level(adev);
- cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
-
- return 0;
-}
-
-static int cz_dpm_hw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int ret = 0;
-
- mutex_lock(&adev->pm.mutex);
-
- /* smu init only needs to be called at startup, not resume.
- * It should be in sw_init, but requires the fw info gathered
- * in sw_init from other IP modules.
- */
- ret = cz_smu_init(adev);
- if (ret) {
- DRM_ERROR("amdgpu: smc initialization failed\n");
- mutex_unlock(&adev->pm.mutex);
- return ret;
- }
-
- /* do the actual fw loading */
- ret = cz_smu_start(adev);
- if (ret) {
- DRM_ERROR("amdgpu: smc start failed\n");
- mutex_unlock(&adev->pm.mutex);
- return ret;
- }
-
- if (!amdgpu_dpm) {
- adev->pm.dpm_enabled = false;
- mutex_unlock(&adev->pm.mutex);
- return ret;
- }
-
- /* cz dpm setup asic */
- cz_dpm_setup_asic(adev);
-
- /* cz dpm enable */
- ret = cz_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
-
- mutex_unlock(&adev->pm.mutex);
-
- return 0;
-}
-
-static int cz_dpm_disable(struct amdgpu_device *adev)
-{
- int ret = 0;
-
- if (!cz_check_for_dpm_enabled(adev))
- return -EINVAL;
-
- ret = cz_enable_didt(adev, false);
- if (ret) {
- DRM_ERROR("disable di/dt failed\n");
- return -EINVAL;
- }
-
- /* powerup blocks */
- cz_dpm_powergate_uvd(adev, false);
- cz_dpm_powergate_vce(adev, false);
-
- cz_clear_voting_clients(adev);
- cz_stop_dpm(adev);
- cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
-
- return 0;
-}
-
-static int cz_dpm_hw_fini(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- /* smu fini only needs to be called at teardown, not suspend.
- * It should be in sw_fini, but we put it here for symmetry
- * with smu init.
- */
- cz_smu_fini(adev);
-
- if (adev->pm.dpm_enabled) {
- ret = cz_dpm_disable(adev);
-
- adev->pm.dpm.current_ps =
- adev->pm.dpm.requested_ps =
- adev->pm.dpm.boot_ps;
- }
-
- adev->pm.dpm_enabled = false;
-
- mutex_unlock(&adev->pm.mutex);
-
- return ret;
-}
-
-static int cz_dpm_suspend(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
-
- ret = cz_dpm_disable(adev);
-
- adev->pm.dpm.current_ps =
- adev->pm.dpm.requested_ps =
- adev->pm.dpm.boot_ps;
-
- mutex_unlock(&adev->pm.mutex);
- }
-
- return ret;
-}
-
-static int cz_dpm_resume(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- /* do the actual fw loading */
- ret = cz_smu_start(adev);
- if (ret) {
- DRM_ERROR("amdgpu: smc start failed\n");
- mutex_unlock(&adev->pm.mutex);
- return ret;
- }
-
- if (!amdgpu_dpm) {
- adev->pm.dpm_enabled = false;
- mutex_unlock(&adev->pm.mutex);
- return ret;
- }
-
- /* cz dpm setup asic */
- cz_dpm_setup_asic(adev);
-
- /* cz dpm enable */
- ret = cz_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
-
- mutex_unlock(&adev->pm.mutex);
- /* upon resume, re-compute the clocks */
- if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
-
- return 0;
-}
-
-static int cz_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int cz_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static int cz_dpm_get_temperature(struct amdgpu_device *adev)
-{
- int actual_temp = 0;
- uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP);
- uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
-
- if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
- actual_temp = 1000 * ((temp / 8) - 49);
- else
- actual_temp = 1000 * (temp / 8);
-
- return actual_temp;
-}
-
-static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
- struct amdgpu_ps *new_ps = &requested_ps;
-
- cz_update_requested_ps(adev, new_ps);
- cz_apply_state_adjust_rules(adev, &pi->requested_rps,
- &pi->current_rps);
-
- return 0;
-}
-
-static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_clock_and_voltage_limits *limits =
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- uint32_t clock, stable_ps_clock = 0;
-
- clock = pi->sclk_dpm.soft_min_clk;
-
- if (pi->caps_stable_power_state) {
- stable_ps_clock = limits->sclk * 75 / 100;
- if (clock < stable_ps_clock)
- clock = stable_ps_clock;
- }
-
- if (clock != pi->sclk_dpm.soft_min_clk) {
- pi->sclk_dpm.soft_min_clk = clock;
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(adev, clock,
- PPSMC_MSG_SetSclkSoftMin));
- }
-
- if (pi->caps_stable_power_state &&
- pi->sclk_dpm.soft_max_clk != clock) {
- pi->sclk_dpm.soft_max_clk = clock;
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(adev, clock,
- PPSMC_MSG_SetSclkSoftMax));
- } else {
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(adev,
- pi->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
- }
-
- return 0;
-}
-
-static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- if (pi->caps_sclk_ds) {
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetMinDeepSleepSclk,
- CZ_MIN_DEEP_SLEEP_SCLK);
- }
-
- return 0;
-}
-
-/* ?? without dal support, is this still needed in setpowerstate list*/
-static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetWatermarkFrequency,
- pi->sclk_dpm.soft_max_clk);
-
- return 0;
-}
-
-static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
-{
- int ret = 0;
- struct cz_power_info *pi = cz_get_pi(adev);
-
- /* also depend on dal NBPStateDisableRequired */
- if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) {
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_EnableAllSmuFeatures,
- NB_DPM_MASK);
- if (ret) {
- DRM_ERROR("amdgpu: nb dpm enable failed\n");
- return ret;
- }
- pi->nb_dpm_enabled = true;
- }
-
- return ret;
-}
-
-static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
- bool enable)
-{
- if (enable)
- cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate);
- else
- cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate);
-
-}
-
-static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct cz_ps *ps = &pi->requested_ps;
-
- if (pi->sys_info.nb_dpm_enable) {
- if (ps->force_high)
- cz_dpm_nbdpm_lm_pstate_enable(adev, false);
- else
- cz_dpm_nbdpm_lm_pstate_enable(adev, true);
- }
-
- return 0;
-}
-
-/* with dpm enabled */
-static int cz_dpm_set_power_state(struct amdgpu_device *adev)
-{
- cz_dpm_update_sclk_limit(adev);
- cz_dpm_set_deep_sleep_sclk_threshold(adev);
- cz_dpm_set_watermark_threshold(adev);
- cz_dpm_enable_nbdpm(adev);
- cz_dpm_update_low_memory_pstate(adev);
-
- return 0;
-}
-
-static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_ps *ps = &pi->requested_rps;
-
- cz_update_current_ps(adev, ps);
-}
-
-static int cz_dpm_force_highest(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- int ret = 0;
-
- if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) {
- pi->sclk_dpm.soft_min_clk =
- pi->sclk_dpm.soft_max_clk;
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(adev,
- pi->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int cz_dpm_force_lowest(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- int ret = 0;
-
- if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) {
- pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk;
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(adev,
- pi->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- if (!pi->max_sclk_level) {
- cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
- pi->max_sclk_level = cz_get_argument(adev) + 1;
- }
-
- if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) {
- DRM_ERROR("Invalid max sclk level!\n");
- return -EINVAL;
- }
-
- return pi->max_sclk_level;
-}
-
-static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *dep_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- uint32_t level = 0;
- int ret = 0;
-
- pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk;
- level = cz_dpm_get_max_sclk_level(adev) - 1;
- if (level < dep_table->count)
- pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk;
- else
- pi->sclk_dpm.soft_max_clk =
- dep_table->entries[dep_table->count - 1].clk;
-
- /* get min/max sclk soft value
- * notify SMU to execute */
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(adev,
- pi->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
- if (ret)
- return ret;
-
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(adev,
- pi->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
- if (ret)
- return ret;
-
- DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
- pi->sclk_dpm.soft_min_clk,
- pi->sclk_dpm.soft_max_clk);
-
- return 0;
-}
-
-static int cz_dpm_uvd_force_highest(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- int ret = 0;
-
- if (pi->uvd_dpm.soft_min_clk != pi->uvd_dpm.soft_max_clk) {
- pi->uvd_dpm.soft_min_clk =
- pi->uvd_dpm.soft_max_clk;
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetUvdSoftMin,
- cz_get_uvd_level(adev,
- pi->uvd_dpm.soft_min_clk,
- PPSMC_MSG_SetUvdSoftMin));
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int cz_dpm_uvd_force_lowest(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- int ret = 0;
-
- if (pi->uvd_dpm.soft_max_clk != pi->uvd_dpm.soft_min_clk) {
- pi->uvd_dpm.soft_max_clk = pi->uvd_dpm.soft_min_clk;
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetUvdSoftMax,
- cz_get_uvd_level(adev,
- pi->uvd_dpm.soft_max_clk,
- PPSMC_MSG_SetUvdSoftMax));
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static uint32_t cz_dpm_get_max_uvd_level(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- if (!pi->max_uvd_level) {
- cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
- pi->max_uvd_level = cz_get_argument(adev) + 1;
- }
-
- if (pi->max_uvd_level > CZ_MAX_HARDWARE_POWERLEVELS) {
- DRM_ERROR("Invalid max uvd level!\n");
- return -EINVAL;
- }
-
- return pi->max_uvd_level;
-}
-
-static int cz_dpm_unforce_uvd_dpm_levels(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_uvd_clock_voltage_dependency_table *dep_table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- uint32_t level = 0;
- int ret = 0;
-
- pi->uvd_dpm.soft_min_clk = dep_table->entries[0].vclk;
- level = cz_dpm_get_max_uvd_level(adev) - 1;
- if (level < dep_table->count)
- pi->uvd_dpm.soft_max_clk = dep_table->entries[level].vclk;
- else
- pi->uvd_dpm.soft_max_clk =
- dep_table->entries[dep_table->count - 1].vclk;
-
- /* get min/max sclk soft value
- * notify SMU to execute */
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetUvdSoftMin,
- cz_get_uvd_level(adev,
- pi->uvd_dpm.soft_min_clk,
- PPSMC_MSG_SetUvdSoftMin));
- if (ret)
- return ret;
-
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetUvdSoftMax,
- cz_get_uvd_level(adev,
- pi->uvd_dpm.soft_max_clk,
- PPSMC_MSG_SetUvdSoftMax));
- if (ret)
- return ret;
-
- DRM_DEBUG("DPM uvd unforce state min=%d, max=%d.\n",
- pi->uvd_dpm.soft_min_clk,
- pi->uvd_dpm.soft_max_clk);
-
- return 0;
-}
-
-static int cz_dpm_vce_force_highest(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- int ret = 0;
-
- if (pi->vce_dpm.soft_min_clk != pi->vce_dpm.soft_max_clk) {
- pi->vce_dpm.soft_min_clk =
- pi->vce_dpm.soft_max_clk;
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetEclkSoftMin,
- cz_get_eclk_level(adev,
- pi->vce_dpm.soft_min_clk,
- PPSMC_MSG_SetEclkSoftMin));
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int cz_dpm_vce_force_lowest(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- int ret = 0;
-
- if (pi->vce_dpm.soft_max_clk != pi->vce_dpm.soft_min_clk) {
- pi->vce_dpm.soft_max_clk = pi->vce_dpm.soft_min_clk;
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetEclkSoftMax,
- cz_get_uvd_level(adev,
- pi->vce_dpm.soft_max_clk,
- PPSMC_MSG_SetEclkSoftMax));
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static uint32_t cz_dpm_get_max_vce_level(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- if (!pi->max_vce_level) {
- cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
- pi->max_vce_level = cz_get_argument(adev) + 1;
- }
-
- if (pi->max_vce_level > CZ_MAX_HARDWARE_POWERLEVELS) {
- DRM_ERROR("Invalid max vce level!\n");
- return -EINVAL;
- }
-
- return pi->max_vce_level;
-}
-
-static int cz_dpm_unforce_vce_dpm_levels(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_vce_clock_voltage_dependency_table *dep_table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- uint32_t level = 0;
- int ret = 0;
-
- pi->vce_dpm.soft_min_clk = dep_table->entries[0].ecclk;
- level = cz_dpm_get_max_vce_level(adev) - 1;
- if (level < dep_table->count)
- pi->vce_dpm.soft_max_clk = dep_table->entries[level].ecclk;
- else
- pi->vce_dpm.soft_max_clk =
- dep_table->entries[dep_table->count - 1].ecclk;
-
- /* get min/max sclk soft value
- * notify SMU to execute */
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetEclkSoftMin,
- cz_get_eclk_level(adev,
- pi->vce_dpm.soft_min_clk,
- PPSMC_MSG_SetEclkSoftMin));
- if (ret)
- return ret;
-
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetEclkSoftMax,
- cz_get_eclk_level(adev,
- pi->vce_dpm.soft_max_clk,
- PPSMC_MSG_SetEclkSoftMax));
- if (ret)
- return ret;
-
- DRM_DEBUG("DPM vce unforce state min=%d, max=%d.\n",
- pi->vce_dpm.soft_min_clk,
- pi->vce_dpm.soft_max_clk);
-
- return 0;
-}
-
-static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
- enum amdgpu_dpm_forced_level level)
-{
- int ret = 0;
-
- switch (level) {
- case AMDGPU_DPM_FORCED_LEVEL_HIGH:
- /* sclk */
- ret = cz_dpm_unforce_dpm_levels(adev);
- if (ret)
- return ret;
- ret = cz_dpm_force_highest(adev);
- if (ret)
- return ret;
-
- /* uvd */
- ret = cz_dpm_unforce_uvd_dpm_levels(adev);
- if (ret)
- return ret;
- ret = cz_dpm_uvd_force_highest(adev);
- if (ret)
- return ret;
-
- /* vce */
- ret = cz_dpm_unforce_vce_dpm_levels(adev);
- if (ret)
- return ret;
- ret = cz_dpm_vce_force_highest(adev);
- if (ret)
- return ret;
- break;
- case AMDGPU_DPM_FORCED_LEVEL_LOW:
- /* sclk */
- ret = cz_dpm_unforce_dpm_levels(adev);
- if (ret)
- return ret;
- ret = cz_dpm_force_lowest(adev);
- if (ret)
- return ret;
-
- /* uvd */
- ret = cz_dpm_unforce_uvd_dpm_levels(adev);
- if (ret)
- return ret;
- ret = cz_dpm_uvd_force_lowest(adev);
- if (ret)
- return ret;
-
- /* vce */
- ret = cz_dpm_unforce_vce_dpm_levels(adev);
- if (ret)
- return ret;
- ret = cz_dpm_vce_force_lowest(adev);
- if (ret)
- return ret;
- break;
- case AMDGPU_DPM_FORCED_LEVEL_AUTO:
- /* sclk */
- ret = cz_dpm_unforce_dpm_levels(adev);
- if (ret)
- return ret;
-
- /* uvd */
- ret = cz_dpm_unforce_uvd_dpm_levels(adev);
- if (ret)
- return ret;
-
- /* vce */
- ret = cz_dpm_unforce_vce_dpm_levels(adev);
- if (ret)
- return ret;
- break;
- default:
- break;
- }
-
- adev->pm.dpm.forced_level = level;
-
- return ret;
-}
-
-/* fix me, display configuration change lists here
- * mostly dal related*/
-static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev)
-{
-}
-
-static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps);
-
- if (low)
- return requested_state->levels[0].sclk;
- else
- return requested_state->levels[requested_state->num_levels - 1].sclk;
-
-}
-
-static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- return pi->sys_info.bootup_uma_clk;
-}
-
-static int cz_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- int ret = 0;
-
- if (enable && pi->caps_uvd_dpm ) {
- pi->dpm_flags |= DPMFlags_UVD_Enabled;
- DRM_DEBUG("UVD DPM Enabled.\n");
-
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_EnableAllSmuFeatures, UVD_DPM_MASK);
- } else {
- pi->dpm_flags &= ~DPMFlags_UVD_Enabled;
- DRM_DEBUG("UVD DPM Stopped\n");
-
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_DisableAllSmuFeatures, UVD_DPM_MASK);
- }
-
- return ret;
-}
-
-static int cz_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
-{
- return cz_enable_uvd_dpm(adev, !gate);
-}
-
-
-static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- int ret;
-
- if (pi->uvd_power_gated == gate)
- return;
-
- pi->uvd_power_gated = gate;
-
- if (gate) {
- if (pi->caps_uvd_pg) {
- ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
- if (ret) {
- DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n");
- return;
- }
-
- /* shutdown the UVD block */
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
-
- if (ret) {
- DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n");
- return;
- }
- }
- cz_update_uvd_dpm(adev, gate);
- if (pi->caps_uvd_pg) {
- /* power off the UVD block */
- ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
- if (ret) {
- DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n");
- return;
- }
- }
- } else {
- if (pi->caps_uvd_pg) {
- /* power on the UVD block */
- if (pi->uvd_dynamic_pg)
- ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
- else
- ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
-
- if (ret) {
- DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n");
- return;
- }
-
- /* re-init the UVD block */
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
-
- if (ret) {
- DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n");
- return;
- }
-
- ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- if (ret) {
- DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n");
- return;
- }
- }
- cz_update_uvd_dpm(adev, gate);
- }
-}
-
-static int cz_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- int ret = 0;
-
- if (enable && pi->caps_vce_dpm) {
- pi->dpm_flags |= DPMFlags_VCE_Enabled;
- DRM_DEBUG("VCE DPM Enabled.\n");
-
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_EnableAllSmuFeatures, VCE_DPM_MASK);
-
- } else {
- pi->dpm_flags &= ~DPMFlags_VCE_Enabled;
- DRM_DEBUG("VCE DPM Stopped\n");
-
- ret = cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_DisableAllSmuFeatures, VCE_DPM_MASK);
- }
-
- return ret;
-}
-
-static int cz_update_vce_dpm(struct amdgpu_device *adev)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
-
- /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
- if (pi->caps_stable_power_state) {
- pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
- } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
- /* leave it as set by user */
- /*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/
- }
-
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(adev,
- pi->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
- return 0;
-}
-
-static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
-{
- struct cz_power_info *pi = cz_get_pi(adev);
-
- if (pi->caps_vce_pg) {
- if (pi->vce_power_gated != gate) {
- if (gate) {
- /* disable clockgating so we can properly shut down the block */
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- /* shutdown the VCE block */
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
-
- cz_enable_vce_dpm(adev, false);
- cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
- pi->vce_power_gated = true;
- } else {
- cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
- pi->vce_power_gated = false;
-
- /* re-init the VCE block */
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
- /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
-
- cz_update_vce_dpm(adev);
- cz_enable_vce_dpm(adev, true);
- }
- } else {
- if (! pi->vce_power_gated) {
- cz_update_vce_dpm(adev);
- }
- }
- } else { /*pi->caps_vce_pg*/
- pi->vce_power_gated = gate;
- cz_update_vce_dpm(adev);
- cz_enable_vce_dpm(adev, !gate);
- }
-}
-
-static int cz_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
- bool *equal)
-{
- if (equal == NULL)
- return -EINVAL;
-
- *equal = false;
- return 0;
-}
-
-const struct amd_ip_funcs cz_dpm_ip_funcs = {
- .name = "cz_dpm",
- .early_init = cz_dpm_early_init,
- .late_init = cz_dpm_late_init,
- .sw_init = cz_dpm_sw_init,
- .sw_fini = cz_dpm_sw_fini,
- .hw_init = cz_dpm_hw_init,
- .hw_fini = cz_dpm_hw_fini,
- .suspend = cz_dpm_suspend,
- .resume = cz_dpm_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
- .set_clockgating_state = cz_dpm_set_clockgating_state,
- .set_powergating_state = cz_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
- .get_temperature = cz_dpm_get_temperature,
- .pre_set_power_state = cz_dpm_pre_set_power_state,
- .set_power_state = cz_dpm_set_power_state,
- .post_set_power_state = cz_dpm_post_set_power_state,
- .display_configuration_changed = cz_dpm_display_configuration_changed,
- .get_sclk = cz_dpm_get_sclk,
- .get_mclk = cz_dpm_get_mclk,
- .print_power_state = cz_dpm_print_power_state,
- .debugfs_print_current_performance_level =
- cz_dpm_debugfs_print_current_performance_level,
- .force_performance_level = cz_dpm_force_dpm_level,
- .vblank_too_short = NULL,
- .powergate_uvd = cz_dpm_powergate_uvd,
- .powergate_vce = cz_dpm_powergate_vce,
- .check_state_equal = cz_check_state_equal,
-};
-
-static void cz_dpm_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->pm.funcs)
- adev->pm.funcs = &cz_dpm_funcs;
-}
-
-const struct amdgpu_ip_block_version cz_dpm_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_dpm_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
deleted file mode 100644
index 5df8c1faab51..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __CZ_DPM_H__
-#define __CZ_DPM_H__
-
-#include "smu8_fusion.h"
-
-#define CZ_AT_DFLT 30
-#define CZ_NUM_NBPSTATES 4
-#define CZ_NUM_NBPMEMORY_CLOCK 2
-#define CZ_MAX_HARDWARE_POWERLEVELS 8
-#define CZ_MAX_DISPLAY_CLOCK_LEVEL 8
-#define CZ_MAX_DISPLAYPHY_IDS 10
-
-#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
-
-#define SMC_RAM_END 0x40000
-
-#define DPMFlags_SCLK_Enabled 0x00000001
-#define DPMFlags_UVD_Enabled 0x00000002
-#define DPMFlags_VCE_Enabled 0x00000004
-#define DPMFlags_ACP_Enabled 0x00000008
-#define DPMFlags_ForceHighestValid 0x40000000
-#define DPMFlags_Debug 0x80000000
-
-/* Do not change the following, it is also defined in SMU8.h */
-#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001
-#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
-#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000
-#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000
-
-/* temporary solution to SetMinDeepSleepSclk
- * should indicate by display adaptor
- * 10k Hz unit*/
-#define CZ_MIN_DEEP_SLEEP_SCLK 800
-
-enum cz_pt_config_reg_type {
- CZ_CONFIGREG_MMR = 0,
- CZ_CONFIGREG_SMC_IND,
- CZ_CONFIGREG_DIDT_IND,
- CZ_CONFIGREG_CACHE,
- CZ_CONFIGREG_MAX
-};
-
-struct cz_pt_config_reg {
- uint32_t offset;
- uint32_t mask;
- uint32_t shift;
- uint32_t value;
- enum cz_pt_config_reg_type type;
-};
-
-struct cz_dpm_entry {
- uint32_t soft_min_clk;
- uint32_t hard_min_clk;
- uint32_t soft_max_clk;
- uint32_t hard_max_clk;
-};
-
-struct cz_pl {
- uint32_t sclk;
- uint8_t vddc_index;
- uint8_t ds_divider_index;
- uint8_t ss_divider_index;
- uint8_t allow_gnb_slow;
- uint8_t force_nbp_state;
- uint8_t display_wm;
- uint8_t vce_wm;
-};
-
-struct cz_ps {
- struct cz_pl levels[CZ_MAX_HARDWARE_POWERLEVELS];
- uint32_t num_levels;
- bool need_dfs_bypass;
- uint8_t dpm0_pg_nb_ps_lo;
- uint8_t dpm0_pg_nb_ps_hi;
- uint8_t dpmx_nb_ps_lo;
- uint8_t dpmx_nb_ps_hi;
- bool force_high;
-};
-
-struct cz_displayphy_entry {
- uint8_t phy_present;
- uint8_t active_lane_mapping;
- uint8_t display_conf_type;
- uint8_t num_active_lanes;
-};
-
-struct cz_displayphy_info {
- bool phy_access_initialized;
- struct cz_displayphy_entry entries[CZ_MAX_DISPLAYPHY_IDS];
-};
-
-struct cz_sys_info {
- uint32_t bootup_uma_clk;
- uint32_t bootup_sclk;
- uint32_t dentist_vco_freq;
- uint32_t nb_dpm_enable;
- uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK];
- uint32_t nbp_n_clock[CZ_NUM_NBPSTATES];
- uint8_t nbp_voltage_index[CZ_NUM_NBPSTATES];
- uint32_t display_clock[CZ_MAX_DISPLAY_CLOCK_LEVEL];
- uint16_t bootup_nb_voltage_index;
- uint8_t htc_tmp_lmt;
- uint8_t htc_hyst_lmt;
- uint32_t uma_channel_number;
-};
-
-struct cz_power_info {
- uint32_t active_target[CZ_MAX_HARDWARE_POWERLEVELS];
- struct cz_sys_info sys_info;
- struct cz_pl boot_pl;
- bool disable_nb_ps3_in_battery;
- bool battery_state;
- uint32_t lowest_valid;
- uint32_t highest_valid;
- uint16_t high_voltage_threshold;
- /* smc offsets */
- uint32_t sram_end;
- uint32_t dpm_table_start;
- uint32_t soft_regs_start;
- /* dpm SMU tables */
- uint8_t uvd_level_count;
- uint8_t vce_level_count;
- uint8_t acp_level_count;
- uint32_t fps_high_threshold;
- uint32_t fps_low_threshold;
- /* dpm table */
- uint32_t dpm_flags;
- struct cz_dpm_entry sclk_dpm;
- struct cz_dpm_entry uvd_dpm;
- struct cz_dpm_entry vce_dpm;
- struct cz_dpm_entry acp_dpm;
-
- uint8_t uvd_boot_level;
- uint8_t uvd_interval;
- uint8_t vce_boot_level;
- uint8_t vce_interval;
- uint8_t acp_boot_level;
- uint8_t acp_interval;
-
- uint8_t graphics_boot_level;
- uint8_t graphics_interval;
- uint8_t graphics_therm_throttle_enable;
- uint8_t graphics_voltage_change_enable;
- uint8_t graphics_clk_slow_enable;
- uint8_t graphics_clk_slow_divider;
-
- uint32_t low_sclk_interrupt_threshold;
- bool uvd_power_gated;
- bool vce_power_gated;
- bool acp_power_gated;
-
- uint32_t active_process_mask;
-
- uint32_t mgcg_cgtt_local0;
- uint32_t mgcg_cgtt_local1;
- uint32_t clock_slow_down_step;
- uint32_t skip_clock_slow_down;
- bool enable_nb_ps_policy;
- uint32_t voting_clients;
- uint32_t voltage_drop_threshold;
- uint32_t gfx_pg_threshold;
- uint32_t max_sclk_level;
- uint32_t max_uvd_level;
- uint32_t max_vce_level;
- /* flags */
- bool didt_enabled;
- bool video_start;
- bool cac_enabled;
- bool bapm_enabled;
- bool nb_dpm_enabled_by_driver;
- bool nb_dpm_enabled;
- bool auto_thermal_throttling_enabled;
- bool dpm_enabled;
- bool need_pptable_upload;
- /* caps */
- bool caps_cac;
- bool caps_power_containment;
- bool caps_sq_ramping;
- bool caps_db_ramping;
- bool caps_td_ramping;
- bool caps_tcp_ramping;
- bool caps_sclk_throttle_low_notification;
- bool caps_fps;
- bool caps_uvd_dpm;
- bool caps_uvd_pg;
- bool caps_vce_dpm;
- bool caps_vce_pg;
- bool caps_acp_dpm;
- bool caps_acp_pg;
- bool caps_stable_power_state;
- bool caps_enable_dfs_bypass;
- bool caps_sclk_ds;
- bool caps_voltage_island;
- /* power state */
- struct amdgpu_ps current_rps;
- struct cz_ps current_ps;
- struct amdgpu_ps requested_rps;
- struct cz_ps requested_ps;
-
- bool uvd_power_down;
- bool vce_power_down;
- bool acp_power_down;
-
- bool uvd_dynamic_pg;
-};
-
-/* cz_smc.c */
-uint32_t cz_get_argument(struct amdgpu_device *adev);
-int cz_send_msg_to_smc(struct amdgpu_device *adev, uint16_t msg);
-int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- uint16_t msg, uint32_t parameter);
-int cz_read_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address, uint32_t *value, uint32_t limit);
-int cz_smu_upload_pptable(struct amdgpu_device *adev);
-int cz_smu_download_pptable(struct amdgpu_device *adev, void **table);
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
deleted file mode 100644
index aed7033c0973..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ /dev/null
@@ -1,995 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "smu8.h"
-#include "smu8_fusion.h"
-#include "cz_ppsmc.h"
-#include "cz_smumgr.h"
-#include "smu_ucode_xfer_cz.h"
-#include "amdgpu_ucode.h"
-#include "cz_dpm.h"
-#include "vi_dpm.h"
-
-#include "smu/smu_8_0_d.h"
-#include "smu/smu_8_0_sh_mask.h"
-#include "gca/gfx_8_0_d.h"
-#include "gca/gfx_8_0_sh_mask.h"
-
-uint32_t cz_get_argument(struct amdgpu_device *adev)
-{
- return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
-}
-
-static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
-{
- struct cz_smu_private_data *priv =
- (struct cz_smu_private_data *)(adev->smu.priv);
-
- return priv;
-}
-
-static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
-{
- int i;
- u32 content = 0, tmp;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
- SMU_MP1_SRBM2P_RESP_0, CONTENT);
- if (content != tmp)
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic*/
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
- WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
-
- return 0;
-}
-
-int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
-{
- int i;
- u32 content = 0, tmp = 0;
-
- if (cz_send_msg_to_smc_async(adev, msg))
- return -EINVAL;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
- SMU_MP1_SRBM2P_RESP_0, CONTENT);
- if (content != tmp)
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic*/
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- if (PPSMC_Result_OK != tmp) {
- dev_err(adev->dev, "SMC Failed to send Message.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- u16 msg, u32 parameter)
-{
- WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
- return cz_send_msg_to_smc(adev, msg);
-}
-
-static int cz_set_smc_sram_address(struct amdgpu_device *adev,
- u32 smc_address, u32 limit)
-{
- if (smc_address & 3)
- return -EINVAL;
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
-
- return 0;
-}
-
-int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 *value, u32 limit)
-{
- int ret;
-
- ret = cz_set_smc_sram_address(adev, smc_address, limit);
- if (ret)
- return ret;
-
- *value = RREG32(mmMP0PUB_IND_DATA_0);
-
- return 0;
-}
-
-static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 value, u32 limit)
-{
- int ret;
-
- ret = cz_set_smc_sram_address(adev, smc_address, limit);
- if (ret)
- return ret;
-
- WREG32(mmMP0PUB_IND_DATA_0, value);
-
- return 0;
-}
-
-static int cz_smu_request_load_fw(struct amdgpu_device *adev)
-{
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
-
- uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
- offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
-
- cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
-
- /*prepare toc buffers*/
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_DriverDramAddrHi,
- priv->toc_buffer.mc_addr_high);
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_DriverDramAddrLo,
- priv->toc_buffer.mc_addr_low);
- cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
-
- /*execute jobs*/
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ExecuteJob,
- priv->toc_entry_aram);
-
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ExecuteJob,
- priv->toc_entry_power_profiling_index);
-
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ExecuteJob,
- priv->toc_entry_initialize_index);
-
- return 0;
-}
-
-/*
- *Check if the FW has been loaded, SMU will not return if loading
- *has not finished.
- */
-static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
- uint32_t fw_mask)
-{
- int i;
- uint32_t index = SMN_MP1_SRAM_START_ADDR +
- SMU8_FIRMWARE_HEADER_LOCATION +
- offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
-
- WREG32(mmMP0PUB_IND_INDEX, index);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
- break;
- udelay(1);
- }
-
- if (i >= adev->usec_timeout) {
- dev_err(adev->dev,
- "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
- fw_mask, RREG32(mmMP0PUB_IND_DATA));
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * interfaces for different ip blocks to check firmware loading status
- * 0 for success otherwise failed
- */
-static int cz_smu_check_finished(struct amdgpu_device *adev,
- enum AMDGPU_UCODE_ID id)
-{
- switch (id) {
- case AMDGPU_UCODE_ID_SDMA0:
- if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
- return 0;
- break;
- case AMDGPU_UCODE_ID_SDMA1:
- if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
- return 0;
- break;
- case AMDGPU_UCODE_ID_CP_CE:
- if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
- return 0;
- break;
- case AMDGPU_UCODE_ID_CP_PFP:
- if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
- return 0;
- case AMDGPU_UCODE_ID_CP_ME:
- if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
- return 0;
- break;
- case AMDGPU_UCODE_ID_CP_MEC1:
- if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
- return 0;
- break;
- case AMDGPU_UCODE_ID_CP_MEC2:
- if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
- return 0;
- break;
- case AMDGPU_UCODE_ID_RLC_G:
- if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
- return 0;
- break;
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- break;
- }
-
- return 1;
-}
-
-static int cz_load_mec_firmware(struct amdgpu_device *adev)
-{
- struct amdgpu_firmware_info *ucode =
- &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- uint32_t reg_data;
- uint32_t tmp;
-
- if (ucode->fw == NULL)
- return -EINVAL;
-
- /* Disable MEC parsing/prefetching */
- tmp = RREG32(mmCP_MEC_CNTL);
- tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
- WREG32(mmCP_MEC_CNTL, tmp);
-
- tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
- tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
- tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
- tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
- tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
- WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
-
- reg_data = lower_32_bits(ucode->mc_addr) &
- REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
- WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
-
- reg_data = upper_32_bits(ucode->mc_addr) &
- REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
- WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
-
- return 0;
-}
-
-int cz_smu_start(struct amdgpu_device *adev)
-{
- int ret = 0;
-
- uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_JT1_MASK |
- UCODE_ID_CP_MEC_JT2_MASK;
-
- if (adev->asic_type == CHIP_STONEY)
- fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
-
- cz_smu_request_load_fw(adev);
- ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
- if (ret)
- return ret;
-
- /* manually load MEC firmware for CZ */
- if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
- ret = cz_load_mec_firmware(adev);
- if (ret) {
- dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
- return ret;
- }
- }
-
- /* setup fw load flag */
- adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
- AMDGPU_SDMA1_UCODE_LOADED |
- AMDGPU_CPCE_UCODE_LOADED |
- AMDGPU_CPPFP_UCODE_LOADED |
- AMDGPU_CPME_UCODE_LOADED |
- AMDGPU_CPMEC1_UCODE_LOADED |
- AMDGPU_CPMEC2_UCODE_LOADED |
- AMDGPU_CPRLC_UCODE_LOADED;
-
- if (adev->asic_type == CHIP_STONEY)
- adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
-
- return ret;
-}
-
-static uint32_t cz_convert_fw_type(uint32_t fw_type)
-{
- enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
-
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- result = AMDGPU_UCODE_ID_SDMA0;
- break;
- case UCODE_ID_SDMA1:
- result = AMDGPU_UCODE_ID_SDMA1;
- break;
- case UCODE_ID_CP_CE:
- result = AMDGPU_UCODE_ID_CP_CE;
- break;
- case UCODE_ID_CP_PFP:
- result = AMDGPU_UCODE_ID_CP_PFP;
- break;
- case UCODE_ID_CP_ME:
- result = AMDGPU_UCODE_ID_CP_ME;
- break;
- case UCODE_ID_CP_MEC_JT1:
- case UCODE_ID_CP_MEC_JT2:
- result = AMDGPU_UCODE_ID_CP_MEC1;
- break;
- case UCODE_ID_RLC_G:
- result = AMDGPU_UCODE_ID_RLC_G;
- break;
- default:
- DRM_ERROR("UCode type is out of range!");
- }
-
- return result;
-}
-
-static uint8_t cz_smu_translate_firmware_enum_to_arg(
- enum cz_scratch_entry firmware_enum)
-{
- uint8_t ret = 0;
-
- switch (firmware_enum) {
- case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
- ret = UCODE_ID_SDMA0;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
- ret = UCODE_ID_SDMA1;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
- ret = UCODE_ID_CP_CE;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
- ret = UCODE_ID_CP_PFP;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
- ret = UCODE_ID_CP_ME;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
- ret = UCODE_ID_CP_MEC_JT1;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
- ret = UCODE_ID_CP_MEC_JT2;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
- ret = UCODE_ID_GMCON_RENG;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
- ret = UCODE_ID_RLC_G;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
- ret = UCODE_ID_RLC_SCRATCH;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
- ret = UCODE_ID_RLC_SRM_ARAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
- ret = UCODE_ID_RLC_SRM_DRAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
- ret = UCODE_ID_DMCU_ERAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
- ret = UCODE_ID_DMCU_IRAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
- ret = TASK_ARG_INIT_MM_PWR_LOG;
- break;
- case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
- case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
- case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
- case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
- case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
- case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
- ret = TASK_ARG_REG_MMIO;
- break;
- case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
- ret = TASK_ARG_INIT_CLK_TABLE;
- break;
- }
-
- return ret;
-}
-
-static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
- enum cz_scratch_entry firmware_enum,
- struct cz_buffer_entry *entry)
-{
- uint64_t gpu_addr;
- uint32_t data_size;
- uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
- enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
- struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
- const struct gfx_firmware_header_v1_0 *header;
-
- if (ucode->fw == NULL)
- return -EINVAL;
-
- gpu_addr = ucode->mc_addr;
- header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
- if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
- (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
- data_size = le32_to_cpu(header->jt_size) << 2;
- }
-
- entry->mc_addr_low = lower_32_bits(gpu_addr);
- entry->mc_addr_high = upper_32_bits(gpu_addr);
- entry->data_size = data_size;
- entry->firmware_ID = firmware_enum;
-
- return 0;
-}
-
-static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
- enum cz_scratch_entry scratch_type,
- uint32_t size_in_byte,
- struct cz_buffer_entry *entry)
-{
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
- uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
- priv->smu_buffer.mc_addr_low;
- mc_addr += size_in_byte;
-
- priv->smu_buffer_used_bytes += size_in_byte;
- entry->data_size = size_in_byte;
- entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
- entry->mc_addr_low = lower_32_bits(mc_addr);
- entry->mc_addr_high = upper_32_bits(mc_addr);
- entry->firmware_ID = scratch_type;
-
- return 0;
-}
-
-static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
- enum cz_scratch_entry firmware_enum,
- bool is_last)
-{
- uint8_t i;
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
- struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
- struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
-
- task->type = TASK_TYPE_UCODE_LOAD;
- task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
- task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
-
- for (i = 0; i < priv->driver_buffer_length; i++)
- if (priv->driver_buffer[i].firmware_ID == firmware_enum)
- break;
-
- if (i >= priv->driver_buffer_length) {
- dev_err(adev->dev, "Invalid Firmware Type\n");
- return -EINVAL;
- }
-
- task->addr.low = priv->driver_buffer[i].mc_addr_low;
- task->addr.high = priv->driver_buffer[i].mc_addr_high;
- task->size_bytes = priv->driver_buffer[i].data_size;
-
- return 0;
-}
-
-static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
- enum cz_scratch_entry firmware_enum,
- uint8_t type, bool is_last)
-{
- uint8_t i;
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
- struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
- struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
-
- task->type = type;
- task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
- task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
-
- for (i = 0; i < priv->scratch_buffer_length; i++)
- if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
- break;
-
- if (i >= priv->scratch_buffer_length) {
- dev_err(adev->dev, "Invalid Firmware Type\n");
- return -EINVAL;
- }
-
- task->addr.low = priv->scratch_buffer[i].mc_addr_low;
- task->addr.high = priv->scratch_buffer[i].mc_addr_high;
- task->size_bytes = priv->scratch_buffer[i].data_size;
-
- if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
- struct cz_ih_meta_data *pIHReg_restore =
- (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
- pIHReg_restore->command =
- METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
- }
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
-{
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
- priv->toc_entry_aram = priv->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- TASK_TYPE_UCODE_SAVE, true);
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
-{
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
- struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
-
- toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- TASK_TYPE_UCODE_SAVE, false);
- cz_smu_populate_single_scratch_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- TASK_TYPE_UCODE_SAVE, true);
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
-{
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
- struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
-
- toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
-
- /* populate ucode */
- if (adev->firmware.smu_load) {
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (adev->asic_type == CHIP_STONEY) {
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- } else {
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- }
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
- }
-
- /* populate scratch */
- cz_smu_populate_single_scratch_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- TASK_TYPE_UCODE_LOAD, false);
- cz_smu_populate_single_scratch_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- TASK_TYPE_UCODE_LOAD, false);
- cz_smu_populate_single_scratch_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- TASK_TYPE_UCODE_LOAD, true);
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
-{
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
-
- priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
-
- cz_smu_populate_single_scratch_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
- TASK_TYPE_INITIALIZE, true);
- return 0;
-}
-
-static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
-{
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
-
- priv->toc_entry_initialize_index = priv->toc_entry_used_count;
-
- if (adev->firmware.smu_load) {
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- if (adev->asic_type == CHIP_STONEY) {
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- } else {
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
- }
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (adev->asic_type == CHIP_STONEY) {
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- } else {
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- }
- cz_smu_populate_single_ucode_load_task(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
- }
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
-{
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
-
- priv->toc_entry_clock_table = priv->toc_entry_used_count;
-
- cz_smu_populate_single_scratch_task(adev,
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
- TASK_TYPE_INITIALIZE, true);
-
- return 0;
-}
-
-static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
-{
- int i;
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
- struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
-
- for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
- toc->JobList[i] = (uint8_t)IGNORE_JOB;
-
- return 0;
-}
-
-/*
- * cz smu uninitialization
- */
-int cz_smu_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_unref(&adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- kfree(adev->smu.priv);
- adev->smu.priv = NULL;
- if (adev->firmware.smu_load)
- amdgpu_ucode_fini_bo(adev);
-
- return 0;
-}
-
-int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
-{
- uint8_t i;
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
-
- for (i = 0; i < priv->scratch_buffer_length; i++)
- if (priv->scratch_buffer[i].firmware_ID ==
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
- break;
-
- if (i >= priv->scratch_buffer_length) {
- dev_err(adev->dev, "Invalid Scratch Type\n");
- return -EINVAL;
- }
-
- *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
-
- /* prepare buffer for pptable */
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetClkTableAddrHi,
- priv->scratch_buffer[i].mc_addr_high);
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetClkTableAddrLo,
- priv->scratch_buffer[i].mc_addr_low);
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ExecuteJob,
- priv->toc_entry_clock_table);
-
- /* actual downloading */
- cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
-
- return 0;
-}
-
-int cz_smu_upload_pptable(struct amdgpu_device *adev)
-{
- uint8_t i;
- struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
-
- for (i = 0; i < priv->scratch_buffer_length; i++)
- if (priv->scratch_buffer[i].firmware_ID ==
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
- break;
-
- if (i >= priv->scratch_buffer_length) {
- dev_err(adev->dev, "Invalid Scratch Type\n");
- return -EINVAL;
- }
-
- /* prepare SMU */
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetClkTableAddrHi,
- priv->scratch_buffer[i].mc_addr_high);
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetClkTableAddrLo,
- priv->scratch_buffer[i].mc_addr_low);
- cz_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ExecuteJob,
- priv->toc_entry_clock_table);
-
- /* actual uploading */
- cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
-
- return 0;
-}
-
-/*
- * cz smumgr functions initialization
- */
-static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
- .check_fw_load_finish = cz_smu_check_finished,
- .request_smu_load_fw = NULL,
- .request_smu_specific_fw = NULL,
-};
-
-/*
- * cz smu initialization
- */
-int cz_smu_init(struct amdgpu_device *adev)
-{
- int ret = -EINVAL;
- uint64_t mc_addr = 0;
- struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
- struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
- void *toc_buf_ptr = NULL;
- void *smu_buf_ptr = NULL;
-
- struct cz_smu_private_data *priv =
- kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
- if (priv == NULL)
- return -ENOMEM;
-
- /* allocate firmware buffers */
- if (adev->firmware.smu_load)
- amdgpu_ucode_init_bo(adev);
-
- adev->smu.priv = priv;
- adev->smu.fw_flags = 0;
- priv->toc_buffer.data_size = 4096;
-
- priv->smu_buffer.data_size =
- ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
- ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
- ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
- ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
- ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
-
- /* prepare toc buffer and smu buffer:
- * 1. create amdgpu_bo for toc buffer and smu buffer
- * 2. pin mc address
- * 3. map kernel virtual address
- */
- ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- toc_buf);
-
- if (ret) {
- dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
- return ret;
- }
-
- ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- smu_buf);
-
- if (ret) {
- dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
- return ret;
- }
-
- /* toc buffer reserve/pin/map */
- ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.toc_buf);
- dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
- return ret;
- }
-
- ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
- return ret;
- }
-
- ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
- if (ret)
- goto smu_init_failed;
-
- amdgpu_bo_unreserve(adev->smu.toc_buf);
-
- priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
- priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
- priv->toc_buffer.kaddr = toc_buf_ptr;
-
- /* smu buffer reserve/pin/map */
- ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.smu_buf);
- dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
- return ret;
- }
-
- ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
- return ret;
- }
-
- ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
- if (ret)
- goto smu_init_failed;
-
- amdgpu_bo_unreserve(adev->smu.smu_buf);
-
- priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
- priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
- priv->smu_buffer.kaddr = smu_buf_ptr;
-
- if (adev->firmware.smu_load) {
- if (cz_smu_populate_single_firmware_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
- &priv->driver_buffer[priv->driver_buffer_length++]))
- goto smu_init_failed;
-
- if (adev->asic_type == CHIP_STONEY) {
- if (cz_smu_populate_single_firmware_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
- &priv->driver_buffer[priv->driver_buffer_length++]))
- goto smu_init_failed;
- } else {
- if (cz_smu_populate_single_firmware_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
- &priv->driver_buffer[priv->driver_buffer_length++]))
- goto smu_init_failed;
- }
- if (cz_smu_populate_single_firmware_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
- &priv->driver_buffer[priv->driver_buffer_length++]))
- goto smu_init_failed;
- if (cz_smu_populate_single_firmware_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
- &priv->driver_buffer[priv->driver_buffer_length++]))
- goto smu_init_failed;
- if (cz_smu_populate_single_firmware_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
- &priv->driver_buffer[priv->driver_buffer_length++]))
- goto smu_init_failed;
- if (cz_smu_populate_single_firmware_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
- &priv->driver_buffer[priv->driver_buffer_length++]))
- goto smu_init_failed;
- if (adev->asic_type == CHIP_STONEY) {
- if (cz_smu_populate_single_firmware_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
- &priv->driver_buffer[priv->driver_buffer_length++]))
- goto smu_init_failed;
- } else {
- if (cz_smu_populate_single_firmware_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
- &priv->driver_buffer[priv->driver_buffer_length++]))
- goto smu_init_failed;
- }
- if (cz_smu_populate_single_firmware_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
- &priv->driver_buffer[priv->driver_buffer_length++]))
- goto smu_init_failed;
- }
-
- if (cz_smu_populate_single_scratch_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
- &priv->scratch_buffer[priv->scratch_buffer_length++]))
- goto smu_init_failed;
- if (cz_smu_populate_single_scratch_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
- &priv->scratch_buffer[priv->scratch_buffer_length++]))
- goto smu_init_failed;
- if (cz_smu_populate_single_scratch_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
- &priv->scratch_buffer[priv->scratch_buffer_length++]))
- goto smu_init_failed;
- if (cz_smu_populate_single_scratch_entry(adev,
- CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
- sizeof(struct SMU8_MultimediaPowerLogData),
- &priv->scratch_buffer[priv->scratch_buffer_length++]))
- goto smu_init_failed;
- if (cz_smu_populate_single_scratch_entry(adev,
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
- sizeof(struct SMU8_Fusion_ClkTable),
- &priv->scratch_buffer[priv->scratch_buffer_length++]))
- goto smu_init_failed;
-
- cz_smu_initialize_toc_empty_job_list(adev);
- cz_smu_construct_toc_for_rlc_aram_save(adev);
- cz_smu_construct_toc_for_vddgfx_enter(adev);
- cz_smu_construct_toc_for_vddgfx_exit(adev);
- cz_smu_construct_toc_for_power_profiling(adev);
- cz_smu_construct_toc_for_bootup(adev);
- cz_smu_construct_toc_for_clock_table(adev);
- /* init the smumgr functions */
- adev->smu.smumgr_funcs = &cz_smumgr_funcs;
-
- return 0;
-
-smu_init_failed:
- amdgpu_bo_unref(toc_buf);
- amdgpu_bo_unref(smu_buf);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h
deleted file mode 100644
index 026342fcf0f3..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __CZ_SMC_H__
-#define __CZ_SMC_H__
-
-#define MAX_NUM_FIRMWARE 8
-#define MAX_NUM_SCRATCH 11
-#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024
-#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048
-#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024
-#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4)
-
-enum cz_scratch_entry {
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
- CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
- CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
- CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
- CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
- CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
- CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START,
- CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
-};
-
-struct cz_buffer_entry {
- uint32_t data_size;
- uint32_t mc_addr_low;
- uint32_t mc_addr_high;
- void *kaddr;
- enum cz_scratch_entry firmware_ID;
-};
-
-struct cz_register_index_data_pair {
- uint32_t offset;
- uint32_t value;
-};
-
-struct cz_ih_meta_data {
- uint32_t command;
- struct cz_register_index_data_pair register_index_value_pair[1];
-};
-
-struct cz_smu_private_data {
- uint8_t driver_buffer_length;
- uint8_t scratch_buffer_length;
- uint16_t toc_entry_used_count;
- uint16_t toc_entry_initialize_index;
- uint16_t toc_entry_power_profiling_index;
- uint16_t toc_entry_aram;
- uint16_t toc_entry_ih_register_restore_task_index;
- uint16_t toc_entry_clock_table;
- uint16_t ih_register_restore_task_size;
- uint16_t smu_buffer_used_bytes;
-
- struct cz_buffer_entry toc_buffer;
- struct cz_buffer_entry smu_buffer;
- struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
- struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index ccb5e02e7b20..d4452d8f76ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2072,7 +2072,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- switch (target_fb->pixel_format) {
+ switch (target_fb->format->format) {
case DRM_FORMAT_C8:
fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
@@ -2145,7 +2145,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format, &format_name));
+ drm_get_format_name(target_fb->format->format, &format_name));
return -EINVAL;
}
@@ -2220,7 +2220,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
dce_v10_0_grph_enable(crtc, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index a7af5b33a5e3..5b24e89552ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2056,7 +2056,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- switch (target_fb->pixel_format) {
+ switch (target_fb->format->format) {
case DRM_FORMAT_C8:
fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
@@ -2129,7 +2129,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format, &format_name));
+ drm_get_format_name(target_fb->format->format, &format_name));
return -EINVAL;
}
@@ -2204,7 +2204,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
dce_v11_0_grph_enable(crtc, true);
@@ -3737,9 +3737,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
default:
encoder->possible_crtcs = 0x3;
break;
+ case 3:
+ encoder->possible_crtcs = 0x7;
+ break;
case 4:
encoder->possible_crtcs = 0xf;
break;
+ case 5:
+ encoder->possible_crtcs = 0x1f;
+ break;
case 6:
encoder->possible_crtcs = 0x3f;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 39df6a50637f..809aa94a0cc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1501,7 +1501,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
- switch (target_fb->pixel_format) {
+ switch (target_fb->format->format) {
case DRM_FORMAT_C8:
fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
GRPH_FORMAT(GRPH_FORMAT_INDEXED));
@@ -1567,7 +1567,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format, &format_name));
+ drm_get_format_name(target_fb->format->format, &format_name));
return -EINVAL;
}
@@ -1630,7 +1630,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
dce_v6_0_grph_enable(crtc, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 28102bb1704d..d2590d75aa11 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1950,7 +1950,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- switch (target_fb->pixel_format) {
+ switch (target_fb->format->format) {
case DRM_FORMAT_C8:
fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
(GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
@@ -2016,7 +2016,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format, &format_name));
+ drm_get_format_name(target_fb->format->format, &format_name));
return -EINVAL;
}
@@ -2079,7 +2079,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
dce_v8_0_grph_enable(crtc, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index b323f5ef64d2..2086e7e68de4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -25,7 +25,7 @@
#include "amdgpu_ih.h"
#include "amdgpu_gfx.h"
#include "amdgpu_ucode.h"
-#include "si/clearstate_si.h"
+#include "clearstate_si.h"
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
#include "oss/oss_1_0_d.h"
@@ -1325,21 +1325,19 @@ static u32 gfx_v6_0_create_bitmask(u32 bit_width)
return (u32)(((u64)1 << bit_width) - 1);
}
-static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
- u32 max_rb_num_per_se,
- u32 sh_per_se)
+static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
- data = RREG32(mmCC_RB_BACKEND_DISABLE);
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ data = RREG32(mmCC_RB_BACKEND_DISABLE) |
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
- data >>= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+ data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
- mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+ mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_backends_per_se/
+ adev->gfx.config.max_sh_per_se);
- return data & mask;
+ return ~data & mask;
}
static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
@@ -1468,68 +1466,55 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
}
-static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
- u32 se_num, u32 sh_per_se,
- u32 max_rb_num_per_se)
+static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
- u32 data, mask;
- u32 disabled_rbs = 0;
- u32 enabled_rbs = 0;
+ u32 data;
+ u32 raster_config = 0;
+ u32 active_rbs = 0;
+ u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
+ adev->gfx.config.max_sh_per_se;
unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < se_num; i++) {
- for (j = 0; j < sh_per_se; j++) {
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
- data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
- disabled_rbs |= data << ((i * sh_per_se + j) * 2);
+ data = gfx_v6_0_get_rb_active_bitmap(adev);
+ active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
+ rb_bitmap_width_per_sh);
}
}
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- mask = 1;
- for (i = 0; i < max_rb_num_per_se * se_num; i++) {
- if (!(disabled_rbs & mask))
- enabled_rbs |= mask;
- mask <<= 1;
- }
- adev->gfx.config.backend_enable_mask = enabled_rbs;
- adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+ adev->gfx.config.backend_enable_mask = active_rbs;
+ adev->gfx.config.num_rbs = hweight32(active_rbs);
num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
adev->gfx.config.max_shader_engines, 16);
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < se_num; i++) {
- gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
- data = 0;
- for (j = 0; j < sh_per_se; j++) {
- switch (enabled_rbs & 3) {
- case 1:
- data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
- break;
- case 2:
- data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
- break;
- case 3:
- default:
- data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
- break;
- }
- enabled_rbs >>= 2;
- }
- gfx_v6_0_raster_config(adev, &data);
+ gfx_v6_0_raster_config(adev, &raster_config);
- if (!adev->gfx.config.backend_enable_mask ||
- adev->gfx.config.num_rbs >= num_rb_pipes)
- WREG32(mmPA_SC_RASTER_CONFIG, data);
- else
- gfx_v6_0_write_harvested_raster_configs(adev, data,
- adev->gfx.config.backend_enable_mask,
- num_rb_pipes);
+ if (!adev->gfx.config.backend_enable_mask ||
+ adev->gfx.config.num_rbs >= num_rb_pipes) {
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+ } else {
+ gfx_v6_0_write_harvested_raster_configs(adev, raster_config,
+ adev->gfx.config.backend_enable_mask,
+ num_rb_pipes);
+ }
+
+ /* cache the values for userspace */
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ adev->gfx.config.rb_config[i][j].rb_backend_disable =
+ RREG32(mmCC_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].raster_config =
+ RREG32(mmPA_SC_RASTER_CONFIG);
+ }
}
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
@@ -1540,36 +1525,44 @@ static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
}
*/
-static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
+static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
{
- u32 data, mask;
+ u32 data;
+
+ if (!bitmap)
+ return;
- data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
- data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
- data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
- data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
- mask = gfx_v6_0_create_bitmask(cu_per_sh);
+static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev)
+{
+ u32 data, mask;
- return ~data & mask;
+ data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
+ RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
+
+ mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
+ return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
}
-static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
- u32 se_num, u32 sh_per_se,
- u32 cu_per_sh)
+static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
{
int i, j, k;
u32 data, mask;
u32 active_cu = 0;
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < se_num; i++) {
- for (j = 0; j < sh_per_se; j++) {
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
- active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
+ active_cu = gfx_v6_0_get_cu_enabled(adev);
mask = 1;
for (k = 0; k < 16; k++) {
@@ -1717,6 +1710,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
break;
}
+ gb_addr_config &= ~GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK;
+ if (adev->gfx.config.max_shader_engines == 2)
+ gb_addr_config |= 1 << GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT;
adev->gfx.config.gb_addr_config = gb_addr_config;
WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
@@ -1735,13 +1731,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
#endif
gfx_v6_0_tiling_mode_table_init(adev);
- gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
- adev->gfx.config.max_sh_per_se,
- adev->gfx.config.max_backends_per_se);
+ gfx_v6_0_setup_rb(adev);
- gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
- adev->gfx.config.max_sh_per_se,
- adev->gfx.config.max_cu_per_sh);
+ gfx_v6_0_setup_spi(adev);
gfx_v6_0_get_cu_info(adev);
@@ -1794,14 +1786,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
{
- int i;
-
adev->gfx.scratch.num_reg = 7;
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
- for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
- adev->gfx.scratch.free[i] = true;
- adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
- }
+ adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
@@ -1975,7 +1962,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err2;
@@ -2946,61 +2933,16 @@ static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
}
}
-static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
- u32 se, u32 sh)
-{
-
- u32 mask = 0, tmp, tmp1;
- int i;
-
- mutex_lock(&adev->grbm_idx_mutex);
- gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
- tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
- tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- tmp &= 0xffff0000;
-
- tmp |= tmp1;
- tmp >>= 16;
-
- for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
- mask <<= 1;
- mask |= 1;
- }
-
- return (~tmp) & mask;
-}
-
static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
{
- u32 i, j, k, active_cu_number = 0;
-
- u32 mask, counter, cu_bitmap;
- u32 tmp = 0;
-
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- mask = 1;
- cu_bitmap = 0;
- counter = 0;
- for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
- if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
- if (counter < 2)
- cu_bitmap |= mask;
- counter++;
- }
- mask <<= 1;
- }
+ u32 tmp;
- active_cu_number += counter;
- tmp |= (cu_bitmap << (i * 16 + j * 8));
- }
- }
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
- WREG32(mmRLC_PG_AO_CU_MASK, tmp);
- WREG32_FIELD(RLC_MAX_PG_CU, MAX_POWERED_UP_CU, active_cu_number);
+ tmp = RREG32(mmRLC_MAX_PG_CU);
+ tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
+ tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
+ WREG32(mmRLC_MAX_PG_CU, tmp);
}
static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
@@ -3775,18 +3717,26 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ unsigned disable_masks[4 * 2];
memset(cu_info, 0, sizeof(*cu_info));
+ amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
+ mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ if (i < 4 && j < 2)
+ gfx_v6_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * 2 + j]);
+ bitmap = gfx_v6_0_get_cu_enabled(adev);
cu_info->bitmap[i][j] = bitmap;
- for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ for (k = 0; k < 16; k++) {
if (bitmap & mask) {
if (counter < 2)
ao_bitmap |= mask;
@@ -3799,6 +3749,9 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
}
}
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index c4e14015ec5b..1f9354541f29 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1983,6 +1983,14 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
(3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
+
+ tmp = RREG32(mmSPI_ARB_PRIORITY);
+ tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
+ tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
+ tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
+ tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
+ WREG32(mmSPI_ARB_PRIORITY, tmp);
+
mutex_unlock(&adev->grbm_idx_mutex);
udelay(50);
@@ -2003,14 +2011,9 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
*/
static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
{
- int i;
-
adev->gfx.scratch.num_reg = 7;
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
- for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
- adev->gfx.scratch.free[i] = true;
- adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
- }
+ adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
/**
@@ -2321,7 +2324,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err2;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 373374164bd5..67afc901905c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -657,6 +657,8 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
+static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t addr);
+static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t addr);
static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
@@ -749,14 +751,9 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
{
- int i;
-
adev->gfx.scratch.num_reg = 7;
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
- for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
- adev->gfx.scratch.free[i] = true;
- adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
- }
+ adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
@@ -829,7 +826,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err2;
@@ -941,6 +938,13 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+
+ /* chain ib ucode isn't formal released, just disable it by far
+ * TODO: when ucod ready we should use ucode version to judge if
+ * chain-ib support or not.
+ */
+ adev->virt.chained_ib_support = false;
+
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
@@ -1367,6 +1371,51 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
}
}
+static int gfx_v8_0_kiq_init_ring(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq)
+{
+ int r = 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
+ if (r)
+ return r;
+ }
+
+ ring->adev = NULL;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+ ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+ if (adev->gfx.mec2_fw) {
+ ring->me = 2;
+ ring->pipe = 0;
+ } else {
+ ring->me = 1;
+ ring->pipe = 1;
+ }
+
+ irq->data = ring;
+ ring->queue = 0;
+ sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue);
+ r = amdgpu_ring_init(adev, ring, 1024,
+ irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
+
+ return r;
+}
+
+static void gfx_v8_0_kiq_free_ring(struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq)
+{
+ if (amdgpu_sriov_vf(ring->adev))
+ amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+
+ amdgpu_ring_fini(ring);
+ irq->data = NULL;
+}
+
#define MEC_HPD_SIZE 2048
static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
@@ -1421,6 +1470,35 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
return 0;
}
+static void gfx_v8_0_kiq_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+ amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
+ kiq->eop_obj = NULL;
+}
+
+static int gfx_v8_0_kiq_init(struct amdgpu_device *adev)
+{
+ int r;
+ u32 *hpd;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+
+ r = amdgpu_bo_create_kernel(adev, MEC_HPD_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
+ &kiq->eop_gpu_addr, (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
+ return r;
+ }
+
+ memset(hpd, 0, MEC_HPD_SIZE);
+
+ amdgpu_bo_kunmap(kiq->eop_obj);
+
+ return 0;
+}
+
static const u32 vgpr_init_compute_shader[] =
{
0x7e000209, 0x7e020208,
@@ -1702,7 +1780,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r) {
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
goto fail;
@@ -1997,8 +2075,14 @@ static int gfx_v8_0_sw_init(void *handle)
{
int i, r;
struct amdgpu_ring *ring;
+ struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* KIQ event */
+ r = amdgpu_irq_add_id(adev, 178, &adev->gfx.kiq.irq);
+ if (r)
+ return r;
+
/* EOP Event */
r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
if (r)
@@ -2036,6 +2120,17 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
}
+ r = gfx_v8_0_kiq_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init KIQ BOs!\n");
+ return r;
+ }
+
+ kiq = &adev->gfx.kiq;
+ r = gfx_v8_0_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
+ if (r)
+ return r;
+
/* set up the gfx ring */
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
@@ -2119,7 +2214,9 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+ gfx_v8_0_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ gfx_v8_0_kiq_fini(adev);
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
gfx_v8_0_free_microcode(adev);
@@ -3801,6 +3898,14 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
(adev->gfx.config.sc_earlyz_tile_fifo_size <<
PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
+
+ tmp = RREG32(mmSPI_ARB_PRIORITY);
+ tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
+ tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
+ tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
+ tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
+ WREG32(mmSPI_ARB_PRIORITY, tmp);
+
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -4024,17 +4129,6 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
gfx_v8_0_init_power_gating(adev);
WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
- if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
- cz_enable_sck_slow_down_on_power_up(adev, true);
- cz_enable_sck_slow_down_on_power_down(adev, true);
- } else {
- cz_enable_sck_slow_down_on_power_up(adev, false);
- cz_enable_sck_slow_down_on_power_down(adev, false);
- }
- if (adev->pg_flags & AMD_PG_SUPPORT_CP)
- cz_enable_cp_power_gating(adev, true);
- else
- cz_enable_cp_power_gating(adev, false);
} else if ((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12)) {
gfx_v8_0_init_csb(adev);
@@ -4506,6 +4600,393 @@ static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev)
}
}
+/* KIQ functions */
+static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
+{
+ uint32_t tmp;
+ struct amdgpu_device *adev = ring->adev;
+
+ /* tell RLC which is KIQ queue */
+ tmp = RREG32(mmRLC_CP_SCHEDULERS);
+ tmp &= 0xffffff00;
+ tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
+ WREG32(mmRLC_CP_SCHEDULERS, tmp);
+ tmp |= 0x80;
+ WREG32(mmRLC_CP_SCHEDULERS, tmp);
+}
+
+static void gfx_v8_0_kiq_enable(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_alloc(ring, 8);
+ /* set resources */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_RESOURCES, 6));
+ amdgpu_ring_write(ring, 0); /* vmid_mask:0 queue_type:0 (KIQ) */
+ amdgpu_ring_write(ring, 0x000000FF); /* queue mask lo */
+ amdgpu_ring_write(ring, 0); /* queue mask hi */
+ amdgpu_ring_write(ring, 0); /* gws mask lo */
+ amdgpu_ring_write(ring, 0); /* gws mask hi */
+ amdgpu_ring_write(ring, 0); /* oac mask */
+ amdgpu_ring_write(ring, 0); /* gds heap base:0, gds heap size:0 */
+ amdgpu_ring_commit(ring);
+ udelay(50);
+}
+
+static void gfx_v8_0_map_queue_enable(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint64_t mqd_addr, wptr_addr;
+
+ mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ amdgpu_ring_alloc(kiq_ring, 8);
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
+ amdgpu_ring_write(kiq_ring, 0x21010000);
+ amdgpu_ring_write(kiq_ring, (ring->doorbell_index << 2) |
+ (ring->queue << 26) |
+ (ring->pipe << 29) |
+ ((ring->me == 1 ? 0 : 1) << 31)); /* doorbell */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+ amdgpu_ring_commit(kiq_ring);
+ udelay(50);
+}
+
+static int gfx_v8_0_mqd_init(struct amdgpu_device *adev,
+ struct vi_mqd *mqd,
+ uint64_t mqd_gpu_addr,
+ uint64_t eop_gpu_addr,
+ struct amdgpu_ring *ring)
+{
+ uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
+ uint32_t tmp;
+
+ mqd->header = 0xC0310800;
+ mqd->compute_pipelinestat_enable = 0x00000001;
+ mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
+ mqd->compute_misc_reserved = 0x00000003;
+
+ eop_base_addr = eop_gpu_addr >> 8;
+ mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
+ mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ tmp = RREG32(mmCP_HQD_EOP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
+ (order_base_2(MEC_HPD_SIZE / 4) - 1));
+
+ mqd->cp_hqd_eop_control = tmp;
+
+ /* enable doorbell? */
+ tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
+
+ if (ring->use_doorbell)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN, 1);
+ else
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN, 0);
+
+ mqd->cp_hqd_pq_doorbell_control = tmp;
+
+ /* disable the queue if it's active */
+ mqd->cp_hqd_dequeue_request = 0;
+ mqd->cp_hqd_pq_rptr = 0;
+ mqd->cp_hqd_pq_wptr = 0;
+
+ /* set the pointer to the MQD */
+ mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
+ mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
+
+ /* set MQD vmid to 0 */
+ tmp = RREG32(mmCP_MQD_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
+ mqd->cp_mqd_control = tmp;
+
+ /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+ hqd_gpu_addr = ring->gpu_addr >> 8;
+ mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
+ mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
+
+ /* set up the HQD, this is similar to CP_RB0_CNTL */
+ tmp = RREG32(mmCP_HQD_PQ_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
+ (order_base_2(ring->ring_size / 4) - 1));
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
+ ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+#ifdef __BIG_ENDIAN
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
+#endif
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ mqd->cp_hqd_pq_control = tmp;
+
+ /* set the wb address whether it's enabled or not */
+ wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
+ mqd->cp_hqd_pq_rptr_report_addr_hi =
+ upper_32_bits(wb_gpu_addr) & 0xffff;
+
+ /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
+ wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
+ mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
+
+ tmp = 0;
+ /* enable the doorbell if requested */
+ if (ring->use_doorbell) {
+ tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_OFFSET, ring->doorbell_index);
+
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_SOURCE, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_HIT, 0);
+ }
+
+ mqd->cp_hqd_pq_doorbell_control = tmp;
+
+ /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+ ring->wptr = 0;
+ mqd->cp_hqd_pq_wptr = ring->wptr;
+ mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
+
+ /* set the vmid for the queue */
+ mqd->cp_hqd_vmid = 0;
+
+ tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
+ mqd->cp_hqd_persistent_state = tmp;
+
+ /* activate the queue */
+ mqd->cp_hqd_active = 1;
+
+ return 0;
+}
+
+static int gfx_v8_0_kiq_init_register(struct amdgpu_device *adev,
+ struct vi_mqd *mqd,
+ struct amdgpu_ring *ring)
+{
+ uint32_t tmp;
+ int j;
+
+ /* disable wptr polling */
+ tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
+ tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
+ WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
+
+ WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo);
+ WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ WREG32(mmCP_HQD_EOP_CONTROL, mqd->cp_hqd_eop_control);
+
+ /* enable doorbell? */
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control);
+
+ /* disable the queue if it's active */
+ if (RREG32(mmCP_HQD_ACTIVE) & 1) {
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request);
+ WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr);
+ WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
+ }
+
+ /* set the pointer to the MQD */
+ WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
+ WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
+
+ /* set MQD vmid to 0 */
+ WREG32(mmCP_MQD_CONTROL, mqd->cp_mqd_control);
+
+ /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+ WREG32(mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
+ WREG32(mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
+
+ /* set up the HQD, this is similar to CP_RB0_CNTL */
+ WREG32(mmCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
+ mqd->cp_hqd_pq_rptr_report_addr_lo);
+ WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ mqd->cp_hqd_pq_rptr_report_addr_hi);
+
+ /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
+ WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr_lo);
+ WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, mqd->cp_hqd_pq_wptr_poll_addr_hi);
+
+ /* enable the doorbell if requested */
+ if (ring->use_doorbell) {
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_FIJI) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
+ AMDGPU_DOORBELL_KIQ << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
+ AMDGPU_DOORBELL_MEC_RING7 << 2);
+ }
+ }
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control);
+
+ /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+ WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
+
+ /* set the vmid for the queue */
+ WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid);
+
+ WREG32(mmCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state);
+
+ /* activate the queue */
+ WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
+
+ if (ring->use_doorbell) {
+ tmp = RREG32(mmCP_PQ_STATUS);
+ tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
+ WREG32(mmCP_PQ_STATUS, tmp);
+ }
+
+ return 0;
+}
+
+static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring,
+ struct vi_mqd *mqd,
+ u64 mqd_gpu_addr)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ uint64_t eop_gpu_addr;
+ bool is_kiq = false;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ is_kiq = true;
+
+ if (is_kiq) {
+ eop_gpu_addr = kiq->eop_gpu_addr;
+ gfx_v8_0_kiq_setting(&kiq->ring);
+ } else
+ eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
+ ring->queue * MEC_HPD_SIZE;
+
+ mutex_lock(&adev->srbm_mutex);
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ gfx_v8_0_mqd_init(adev, mqd, mqd_gpu_addr, eop_gpu_addr, ring);
+
+ if (is_kiq)
+ gfx_v8_0_kiq_init_register(adev, mqd, ring);
+
+ vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (is_kiq)
+ gfx_v8_0_kiq_enable(ring);
+ else
+ gfx_v8_0_map_queue_enable(&kiq->ring, ring);
+
+ return 0;
+}
+
+static void gfx_v8_0_kiq_free_queue(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = NULL;
+ int i;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
+ ring->mqd_obj = NULL;
+ }
+
+ ring = &adev->gfx.kiq.ring;
+ amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
+ ring->mqd_obj = NULL;
+}
+
+static int gfx_v8_0_kiq_setup_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ struct vi_mqd *mqd;
+ u64 mqd_gpu_addr;
+ u32 *buf;
+ int r = 0;
+
+ r = amdgpu_bo_create_kernel(adev, sizeof(struct vi_mqd), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ &mqd_gpu_addr, (void **)&buf);
+ if (r) {
+ dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
+ return r;
+ }
+
+ /* init the mqd struct */
+ memset(buf, 0, sizeof(struct vi_mqd));
+ mqd = (struct vi_mqd *)buf;
+
+ r = gfx_v8_0_kiq_init_queue(ring, mqd, mqd_gpu_addr);
+ if (r)
+ return r;
+
+ amdgpu_bo_kunmap(ring->mqd_obj);
+
+ return 0;
+}
+
+static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = NULL;
+ int r, i;
+
+ ring = &adev->gfx.kiq.ring;
+ r = gfx_v8_0_kiq_setup_queue(adev, ring);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ r = gfx_v8_0_kiq_setup_queue(adev, ring);
+ if (r)
+ return r;
+ }
+
+ gfx_v8_0_cp_compute_enable(adev, true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ ring->ready = false;
+ }
+
+ ring = &adev->gfx.kiq.ring;
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ ring->ready = false;
+
+ return 0;
+}
+
static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
{
int r, i, j;
@@ -4806,7 +5287,10 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
- r = gfx_v8_0_cp_compute_resume(adev);
+ if (amdgpu_sriov_vf(adev))
+ r = gfx_v8_0_kiq_resume(adev);
+ else
+ r = gfx_v8_0_cp_compute_resume(adev);
if (r)
return r;
@@ -4845,6 +5329,7 @@ static int gfx_v8_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
if (amdgpu_sriov_vf(adev)) {
+ gfx_v8_0_kiq_free_queue(adev);
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
@@ -5360,6 +5845,18 @@ static int gfx_v8_0_set_powergating_state(void *handle,
case CHIP_CARRIZO:
case CHIP_STONEY:
+ if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+ cz_enable_sck_slow_down_on_power_up(adev, true);
+ cz_enable_sck_slow_down_on_power_down(adev, true);
+ } else {
+ cz_enable_sck_slow_down_on_power_up(adev, false);
+ cz_enable_sck_slow_down_on_power_down(adev, false);
+ }
+ if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+ cz_enable_cp_power_gating(adev, true);
+ else
+ cz_enable_cp_power_gating(adev, false);
+
cz_update_gfx_cg_power_gating(adev, enable);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
@@ -5396,6 +5893,45 @@ static int gfx_v8_0_set_powergating_state(void *handle,
return 0;
}
+static void gfx_v8_0_get_clockgating_state(void *handle, u32 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ /* AMD_CG_SUPPORT_GFX_MGCG */
+ data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
+ if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK))
+ *flags |= AMD_CG_SUPPORT_GFX_MGCG;
+
+ /* AMD_CG_SUPPORT_GFX_CGLG */
+ data = RREG32(mmRLC_CGCG_CGLS_CTRL);
+ if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_CGCG;
+
+ /* AMD_CG_SUPPORT_GFX_CGLS */
+ if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_CGLS;
+
+ /* AMD_CG_SUPPORT_GFX_CGTS */
+ data = RREG32(mmCGTS_SM_CTRL_REG);
+ if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK))
+ *flags |= AMD_CG_SUPPORT_GFX_CGTS;
+
+ /* AMD_CG_SUPPORT_GFX_CGTS_LS */
+ if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK))
+ *flags |= AMD_CG_SUPPORT_GFX_CGTS_LS;
+
+ /* AMD_CG_SUPPORT_GFX_RLC_LS */
+ data = RREG32(mmRLC_MEM_SLP_CNTL);
+ if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
+
+ /* AMD_CG_SUPPORT_GFX_CP_LS */
+ data = RREG32(mmCP_MEM_SLP_CNTL);
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
+}
+
static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
uint32_t reg_addr, uint32_t cmd)
{
@@ -5444,68 +5980,6 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
-static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
-{
- u32 data = 0;
- unsigned i;
-
- data = RREG32(mmRLC_CNTL);
- if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0)
- return;
-
- if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) ||
- (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG))) {
- data |= RLC_GPR_REG2__REQ_MASK;
- data &= ~RLC_GPR_REG2__MESSAGE_MASK;
- data |= (MSG_ENTER_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT);
- WREG32(mmRLC_GPR_REG2, data);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_GPM_STAT) &
- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
- RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
- RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
- break;
- udelay(1);
- }
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
- break;
- udelay(1);
- }
- adev->gfx.rlc.in_safe_mode = true;
- }
-}
-
-static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev)
-{
- u32 data;
- unsigned i;
-
- data = RREG32(mmRLC_CNTL);
- if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0)
- return;
-
- if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) ||
- (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG))) {
- data |= RLC_GPR_REG2__REQ_MASK;
- data &= ~RLC_GPR_REG2__MESSAGE_MASK;
- data |= (MSG_EXIT_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT);
- WREG32(mmRLC_GPR_REG2, data);
- adev->gfx.rlc.in_safe_mode = false;
- }
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
- break;
- udelay(1);
- }
-}
-
static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
{
u32 data;
@@ -5565,31 +6039,11 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v8_0_nop_enter_rlc_safe_mode(struct amdgpu_device *adev)
-{
- adev->gfx.rlc.in_safe_mode = true;
-}
-
-static void gfx_v8_0_nop_exit_rlc_safe_mode(struct amdgpu_device *adev)
-{
- adev->gfx.rlc.in_safe_mode = false;
-}
-
-static const struct amdgpu_rlc_funcs cz_rlc_funcs = {
- .enter_safe_mode = cz_enter_rlc_safe_mode,
- .exit_safe_mode = cz_exit_rlc_safe_mode
-};
-
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.enter_safe_mode = iceland_enter_rlc_safe_mode,
.exit_safe_mode = iceland_exit_rlc_safe_mode
};
-static const struct amdgpu_rlc_funcs gfx_v8_0_nop_rlc_funcs = {
- .enter_safe_mode = gfx_v8_0_nop_enter_rlc_safe_mode,
- .exit_safe_mode = gfx_v8_0_nop_exit_rlc_safe_mode
-};
-
static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -6011,7 +6465,8 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
+ (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -6224,6 +6679,31 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, upper_32_bits(seq));
}
+static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned int flags)
+{
+ /* we only allocate 32bit for each seq wb address */
+ BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ /* write fence seq to the "addr" */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+
+ if (flags & AMDGPU_FENCE_FLAG_INT) {
+ /* set register to trigger INT */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
+ amdgpu_ring_write(ring, mmCPC_INT_STATUS);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
+ }
+}
+
static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@@ -6234,6 +6714,10 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
uint32_t dw2 = 0;
+ if (amdgpu_sriov_vf(ring->adev))
+ gfx_v8_0_ring_emit_ce_meta_init(ring,
+ (flags & AMDGPU_VM_DOMAIN) ? AMDGPU_CSA_VADDR : ring->adev->virt.csa_vmid0_addr);
+
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
gfx_v8_0_ring_emit_vgt_flush(ring);
@@ -6258,6 +6742,36 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
amdgpu_ring_write(ring, dw2);
amdgpu_ring_write(ring, 0);
+
+ if (amdgpu_sriov_vf(ring->adev))
+ gfx_v8_0_ring_emit_de_meta_init(ring,
+ (flags & AMDGPU_VM_DOMAIN) ? AMDGPU_CSA_VADDR : ring->adev->virt.csa_vmid0_addr);
+}
+
+static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
+ amdgpu_ring_write(ring, 0 | /* src: register*/
+ (5 << 8) | /* dst: memory */
+ (1 << 20)); /* write confirm */
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
+ adev->virt.reg_val_offs * 4));
+ amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
+ adev->virt.reg_val_offs * 4));
+}
+
+static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, val);
}
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
@@ -6405,6 +6919,72 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t tmp, target;
+ struct amdgpu_ring *ring = (struct amdgpu_ring *)src->data;
+
+ BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ));
+
+ if (ring->me == 1)
+ target = mmCP_ME1_PIPE0_INT_CNTL;
+ else
+ target = mmCP_ME2_PIPE0_INT_CNTL;
+ target += ring->pipe;
+
+ switch (type) {
+ case AMDGPU_CP_KIQ_IRQ_DRIVER0:
+ if (state == AMDGPU_IRQ_STATE_DISABLE) {
+ tmp = RREG32(mmCPC_INT_CNTL);
+ tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
+ GENERIC2_INT_ENABLE, 0);
+ WREG32(mmCPC_INT_CNTL, tmp);
+
+ tmp = RREG32(target);
+ tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
+ GENERIC2_INT_ENABLE, 0);
+ WREG32(target, tmp);
+ } else {
+ tmp = RREG32(mmCPC_INT_CNTL);
+ tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
+ GENERIC2_INT_ENABLE, 1);
+ WREG32(mmCPC_INT_CNTL, tmp);
+
+ tmp = RREG32(target);
+ tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
+ GENERIC2_INT_ENABLE, 1);
+ WREG32(target, tmp);
+ }
+ break;
+ default:
+ BUG(); /* kiq only support GENERIC2_INT now */
+ break;
+ }
+ return 0;
+}
+
+static int gfx_v8_0_kiq_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring = (struct amdgpu_ring *)source->data;
+
+ BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ));
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+ DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
+ me_id, pipe_id, queue_id);
+
+ amdgpu_fence_process(ring);
+ return 0;
+}
+
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6423,6 +7003,7 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.post_soft_reset = gfx_v8_0_post_soft_reset,
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
+ .get_clockgating_state = gfx_v8_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
@@ -6440,7 +7021,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
2 + /* gfx_v8_ring_emit_sb */
- 3 + 4, /* gfx_v8_ring_emit_cntxcntl including vgt flush */
+ 3 + 4 + 29, /* gfx_v8_ring_emit_cntxcntl including vgt flush/meta-data */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
@@ -6485,10 +7066,39 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.pad_ib = amdgpu_ring_generic_pad_ib,
};
+static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
+ .type = AMDGPU_RING_TYPE_KIQ,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
+ .get_rptr = gfx_v8_0_ring_get_rptr,
+ .get_wptr = gfx_v8_0_ring_get_wptr_compute,
+ .set_wptr = gfx_v8_0_ring_set_wptr_compute,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v8_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
+ .emit_ib = gfx_v8_0_ring_emit_ib_compute,
+ .emit_fence = gfx_v8_0_ring_emit_fence_kiq,
+ .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
+ .test_ring = gfx_v8_0_ring_test_ring,
+ .test_ib = gfx_v8_0_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_rreg = gfx_v8_0_ring_emit_rreg,
+ .emit_wreg = gfx_v8_0_ring_emit_wreg,
+};
+
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
+ adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq;
+
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
@@ -6511,6 +7121,11 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
.process = gfx_v8_0_priv_inst_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
+ .set = gfx_v8_0_kiq_set_interrupt_state,
+ .process = gfx_v8_0_kiq_irq,
+};
+
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -6521,22 +7136,14 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
+
+ adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
+ adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
}
static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->gfx.rlc.funcs = &iceland_rlc_funcs;
- break;
- case CHIP_STONEY:
- case CHIP_CARRIZO:
- adev->gfx.rlc.funcs = &cz_rlc_funcs;
- break;
- default:
- adev->gfx.rlc.funcs = &gfx_v8_0_nop_rlc_funcs;
- break;
- }
+ adev->gfx.rlc.funcs = &iceland_rlc_funcs;
}
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
@@ -6653,3 +7260,62 @@ const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
.rev = 0,
.funcs = &gfx_v8_0_ip_funcs,
};
+
+static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t csa_addr)
+{
+ uint64_t ce_payload_addr;
+ int cnt_ce;
+ static union {
+ struct amdgpu_ce_ib_state regular;
+ struct amdgpu_ce_ib_state_chained_ib chained;
+ } ce_payload = {};
+
+ if (ring->adev->virt.chained_ib_support) {
+ ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, ce_payload);
+ cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
+ } else {
+ ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data, ce_payload);
+ cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
+ WRITE_DATA_DST_SEL(8) |
+ WR_CONFIRM) |
+ WRITE_DATA_CACHE_POLICY(0));
+ amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
+ amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
+}
+
+static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t csa_addr)
+{
+ uint64_t de_payload_addr, gds_addr;
+ int cnt_de;
+ static union {
+ struct amdgpu_de_ib_state regular;
+ struct amdgpu_de_ib_state_chained_ib chained;
+ } de_payload = {};
+
+ gds_addr = csa_addr + 4096;
+ if (ring->adev->virt.chained_ib_support) {
+ de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
+ de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr);
+ de_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, de_payload);
+ cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2;
+ } else {
+ de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr);
+ de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr);
+ de_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data, de_payload);
+ cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2;
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ WRITE_DATA_DST_SEL(8) |
+ WR_CONFIRM) |
+ WRITE_DATA_CACHE_POLICY(0));
+ amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
+ amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
+ amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 273b16fb9459..8d05e0c4e3d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -375,9 +375,16 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
/* size in MB on si */
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->mc.visible_vram_size = adev->mc.aper_size;
+
+#ifdef CONFIG_X86_64
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
+ adev->mc.aper_size = adev->mc.real_vram_size;
+ }
+#endif
/* In case the PCI BAR is larger than the actual amount of vram */
+ adev->mc.visible_vram_size = adev->mc.aper_size;
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 476bc9f1954b..7669b3259f35 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -467,9 +467,16 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
/* size in MB on si */
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->mc.visible_vram_size = adev->mc.aper_size;
+
+#ifdef CONFIG_X86_64
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
+ adev->mc.aper_size = adev->mc.real_vram_size;
+ }
+#endif
/* In case the PCI BAR is larger than the actual amount of vram */
+ adev->mc.visible_vram_size = adev->mc.aper_size;
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
@@ -1439,6 +1446,21 @@ static int gmc_v8_0_set_powergating_state(void *handle,
return 0;
}
+static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ /* AMD_CG_SUPPORT_MC_MGCG */
+ data = RREG32(mmMC_HUB_MISC_HUB_CG);
+ if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+ /* AMD_CG_SUPPORT_MC_LS */
+ if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_LS;
+}
+
static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.name = "gmc_v8_0",
.early_init = gmc_v8_0_early_init,
@@ -1457,6 +1479,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.post_soft_reset = gmc_v8_0_post_soft_reset,
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
+ .get_clockgating_state = gmc_v8_0_get_clockgating_state,
};
static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 5a1bc358bcb1..f5a343cb0010 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1230,6 +1230,7 @@ static void kv_update_current_ps(struct amdgpu_device *adev,
pi->current_rps = *rps;
pi->current_ps = *new_ps;
pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
static void kv_update_requested_ps(struct amdgpu_device *adev,
@@ -1241,6 +1242,7 @@ static void kv_update_requested_ps(struct amdgpu_device *adev,
pi->requested_rps = *rps;
pi->requested_ps = *new_ps;
pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable)
@@ -1548,11 +1550,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
kv_dpm_powergate_vce(adev, false);
- /* turn the clocks on when encoding */
- ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- if (ret)
- return ret;
if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1;
else
@@ -1571,15 +1568,9 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
amdgpu_kv_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_VCEDPM_SetEnabledMask,
(1 << pi->vce_boot_level));
-
kv_enable_vce_dpm(adev, true);
} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
kv_enable_vce_dpm(adev, false);
- /* turn the clocks off when not encoding */
- ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
- if (ret)
- return ret;
kv_dpm_powergate_vce(adev, true);
}
@@ -1686,70 +1677,44 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
- if (pi->uvd_power_gated == gate)
- return;
-
pi->uvd_power_gated = gate;
if (gate) {
- if (pi->caps_uvd_pg) {
- /* disable clockgating so we can properly shut down the block */
- ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- /* shutdown the UVD block */
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- /* XXX: check for errors */
- }
+ /* stop the UVD block */
+ ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
kv_update_uvd_dpm(adev, gate);
if (pi->caps_uvd_pg)
/* power off the UVD block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
} else {
- if (pi->caps_uvd_pg) {
+ if (pi->caps_uvd_pg)
/* power on the UVD block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
/* re-init the UVD block */
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
- /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
- ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
- /* XXX: check for errors */
- }
kv_update_uvd_dpm(adev, gate);
+
+ ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
}
}
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
if (pi->vce_power_gated == gate)
return;
pi->vce_power_gated = gate;
- if (gate) {
- if (pi->caps_vce_pg) {
- /* shutdown the VCE block */
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- /* XXX: check for errors */
- /* power off the VCE block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
- }
- } else {
- if (pi->caps_vce_pg) {
- /* power on the VCE block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
- /* re-init the VCE block */
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
- /* XXX: check for errors */
- }
- }
+ if (!pi->caps_vce_pg)
+ return;
+
+ if (gate)
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+ else
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
}
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
@@ -1904,19 +1869,19 @@ static int kv_enable_nb_dpm(struct amdgpu_device *adev,
}
static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
- enum amdgpu_dpm_forced_level level)
+ enum amd_dpm_forced_level level)
{
int ret;
- if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+ if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
ret = kv_force_dpm_highest(adev);
if (ret)
return ret;
- } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+ } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
ret = kv_force_dpm_lowest(adev);
if (ret)
return ret;
- } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+ } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
ret = kv_unforce_levels(adev);
if (ret)
return ret;
@@ -3007,8 +2972,6 @@ static int kv_dpm_late_init(void *handle)
kv_dpm_powergate_acp(adev, true);
kv_dpm_powergate_samu(adev, true);
- kv_dpm_powergate_vce(adev, true);
- kv_dpm_powergate_uvd(adev, true);
return 0;
}
@@ -3029,7 +2992,7 @@ static int kv_dpm_sw_init(void *handle)
/* default to balanced state */
adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+ adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
adev->pm.default_sclk = adev->clock.default_sclk;
adev->pm.default_mclk = adev->clock.default_mclk;
adev->pm.current_sclk = adev->clock.default_sclk;
@@ -3078,6 +3041,9 @@ static int kv_dpm_hw_init(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!amdgpu_dpm)
+ return 0;
+
mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
@@ -3245,15 +3211,52 @@ static int kv_dpm_set_powergating_state(void *handle,
return 0;
}
+static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
+ const struct kv_pl *kv_cpl2)
+{
+ return ((kv_cpl1->sclk == kv_cpl2->sclk) &&
+ (kv_cpl1->vddc_index == kv_cpl2->vddc_index) &&
+ (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) &&
+ (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
+}
+
static int kv_check_state_equal(struct amdgpu_device *adev,
struct amdgpu_ps *cps,
struct amdgpu_ps *rps,
bool *equal)
{
- if (equal == NULL)
+ struct kv_ps *kv_cps;
+ struct kv_ps *kv_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
- *equal = false;
+ kv_cps = kv_get_ps(cps);
+ kv_rps = kv_get_ps(rps);
+
+ if (kv_cps == NULL) {
+ *equal = false;
+ return 0;
+ }
+
+ if (kv_cps->num_levels != kv_rps->num_levels) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < kv_cps->num_levels; i++) {
+ if (!kv_are_power_levels_equal(&(kv_cps->levels[i]),
+ &(kv_rps->levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
return 0;
}
@@ -3307,12 +3310,3 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
}
-
-const struct amdgpu_ip_block_version kv_dpm_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &kv_dpm_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
new file mode 100644
index 000000000000..d2622b6f49fa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -0,0 +1,592 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Xiangliang.Yu@amd.com
+ */
+
+#include "amdgpu.h"
+#include "vi.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "vid.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gmc_v8_0.h"
+#include "gfx_v8_0.h"
+#include "sdma_v3_0.h"
+#include "tonga_ih.h"
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "smu/smu_7_1_3_d.h"
+#include "mxgpu_vi.h"
+
+/* VI golden setting */
+static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
+ mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
+ mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
+ mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
+ mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+ mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
+ mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+ mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
+ mmPCIE_INDEX, 0xffffffff, 0x0140001c,
+ mmPCIE_DATA, 0x000f0000, 0x00000000,
+ mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
+ mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
+ mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+ mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
+ mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
+ mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+ mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
+ mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
+ mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
+};
+
+static const u32 xgpu_fiji_golden_settings_a10[] = {
+ mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+ mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+ mmFBC_MISC, 0x1f311fff, 0x12300000,
+ mmHDMI_CONTROL, 0x31000111, 0x00000011,
+ mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
+ mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+ mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
+ mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+};
+
+static const u32 xgpu_fiji_golden_common_all[] = {
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
+ mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
+ mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
+};
+
+static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
+ mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
+ mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
+ mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
+ mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+ mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+ mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+ mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
+ mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+ mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+ mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+ mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+ mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
+ mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+ mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+ mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+ mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+ mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
+ mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+ mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+ mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+ mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+ mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
+ mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+ mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+ mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+ mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+ mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
+ mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+ mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+ mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+ mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+ mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
+ mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+ mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+ mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+ mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+ mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
+ mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+ mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+ mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
+ mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
+ mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
+ mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
+ mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
+ mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
+ mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+ mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
+ mmPCIE_INDEX, 0xffffffff, 0x0140001c,
+ mmPCIE_DATA, 0x000f0000, 0x00000000,
+ mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
+ mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
+ mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+ mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
+ mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
+ mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+ mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
+ mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
+ mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
+};
+
+static const u32 xgpu_tonga_golden_settings_a11[] = {
+ mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
+ mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+ mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+ mmFBC_MISC, 0x1f311fff, 0x12300000,
+ mmGB_GPU_ID, 0x0000000f, 0x00000000,
+ mmHDMI_CONTROL, 0x31000111, 0x00000011,
+ mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
+ mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
+ mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
+ mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+ mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
+ mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
+ mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
+ mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
+ mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+ mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
+ mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
+ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
+ mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+};
+
+static const u32 xgpu_tonga_golden_common_all[] = {
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
+ mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
+ mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+};
+
+void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ amdgpu_program_register_sequence(adev,
+ xgpu_fiji_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(
+ xgpu_fiji_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ xgpu_fiji_golden_settings_a10,
+ (const u32)ARRAY_SIZE(
+ xgpu_fiji_golden_settings_a10));
+ amdgpu_program_register_sequence(adev,
+ xgpu_fiji_golden_common_all,
+ (const u32)ARRAY_SIZE(
+ xgpu_fiji_golden_common_all));
+ break;
+ case CHIP_TONGA:
+ amdgpu_program_register_sequence(adev,
+ xgpu_tonga_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(
+ xgpu_tonga_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ xgpu_tonga_golden_settings_a11,
+ (const u32)ARRAY_SIZE(
+ xgpu_tonga_golden_settings_a11));
+ amdgpu_program_register_sequence(adev,
+ xgpu_tonga_golden_common_all,
+ (const u32)ARRAY_SIZE(
+ xgpu_tonga_golden_common_all));
+ break;
+ default:
+ BUG_ON("Doesn't support chip type.\n");
+ break;
+ }
+}
+
+/*
+ * Mailbox communication between GPU hypervisor and VFs
+ */
+static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
+{
+ u32 reg;
+
+ reg = RREG32(mmMAILBOX_CONTROL);
+ reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
+ WREG32(mmMAILBOX_CONTROL, reg);
+}
+
+static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
+{
+ u32 reg;
+
+ reg = RREG32(mmMAILBOX_CONTROL);
+ reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
+ TRN_MSG_VALID, val ? 1 : 0);
+ WREG32(mmMAILBOX_CONTROL, reg);
+}
+
+static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
+ enum idh_event event)
+{
+ u32 reg;
+
+ reg = RREG32(mmMAILBOX_MSGBUF_TRN_DW0);
+ reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
+ MSGBUF_DATA, event);
+ WREG32(mmMAILBOX_MSGBUF_TRN_DW0, reg);
+
+ xgpu_vi_mailbox_set_valid(adev, true);
+}
+
+static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
+ enum idh_event event)
+{
+ u32 reg;
+
+ reg = RREG32(mmMAILBOX_MSGBUF_RCV_DW0);
+ if (reg != event)
+ return -ENOENT;
+
+ /* send ack to PF */
+ xgpu_vi_mailbox_send_ack(adev);
+
+ return 0;
+}
+
+static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
+{
+ int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
+ u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
+ u32 reg;
+
+ reg = RREG32(mmMAILBOX_CONTROL);
+ while (!(reg & mask)) {
+ if (timeout <= 0) {
+ pr_err("Doesn't get ack from pf.\n");
+ r = -ETIME;
+ break;
+ }
+ msleep(1);
+ timeout -= 1;
+
+ reg = RREG32(mmMAILBOX_CONTROL);
+ }
+
+ return r;
+}
+
+static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
+{
+ int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
+
+ r = xgpu_vi_mailbox_rcv_msg(adev, event);
+ while (r) {
+ if (timeout <= 0) {
+ pr_err("Doesn't get ack from pf.\n");
+ r = -ETIME;
+ break;
+ }
+ msleep(1);
+ timeout -= 1;
+
+ r = xgpu_vi_mailbox_rcv_msg(adev, event);
+ }
+
+ return r;
+}
+
+static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request request)
+{
+ int r;
+
+ xgpu_vi_mailbox_trans_msg(adev, request);
+
+ /* start to poll ack */
+ r = xgpu_vi_poll_ack(adev);
+ if (r)
+ return r;
+
+ xgpu_vi_mailbox_set_valid(adev, false);
+
+ /* start to check msg if request is idh_req_gpu_init_access */
+ if (request == IDH_REQ_GPU_INIT_ACCESS) {
+ r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int xgpu_vi_request_reset(struct amdgpu_device *adev)
+{
+ return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+}
+
+static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_event event;
+
+ event = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
+ return xgpu_vi_send_access_requests(adev, event);
+}
+
+static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_event event;
+ int r = 0;
+
+ event = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
+ r = xgpu_vi_send_access_requests(adev, event);
+
+ return r;
+}
+
+/* add support mailbox interrupts */
+static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("get ack intr and do nothing.\n");
+ return 0;
+}
+
+static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32(mmMAILBOX_INT_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32(mmMAILBOX_INT_CNTL, tmp);
+
+ return 0;
+}
+
+static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work,
+ struct amdgpu_virt, flr_work.work);
+ struct amdgpu_device *adev = container_of(virt,
+ struct amdgpu_device, virt);
+ int r = 0;
+
+ r = xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
+ if (r)
+ DRM_ERROR("failed to get flr cmpl msg from hypervior.\n");
+
+ /* TODO: need to restore gfx states */
+}
+
+static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32(mmMAILBOX_INT_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32(mmMAILBOX_INT_CNTL, tmp);
+
+ return 0;
+}
+
+static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ int r;
+
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
+ /* do nothing for other msg */
+ if (r)
+ return 0;
+
+ /* TODO: need to save gfx states */
+ schedule_delayed_work(&adev->virt.flr_work,
+ msecs_to_jiffies(VI_MAILBOX_RESET_TIME));
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
+ .set = xgpu_vi_set_mailbox_ack_irq,
+ .process = xgpu_vi_mailbox_ack_irq,
+};
+
+static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
+ .set = xgpu_vi_set_mailbox_rcv_irq,
+ .process = xgpu_vi_mailbox_rcv_irq,
+};
+
+void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->virt.ack_irq.num_types = 1;
+ adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
+ adev->virt.rcv_irq.num_types = 1;
+ adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
+}
+
+int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_add_id(adev, 135, &adev->virt.rcv_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, 138, &adev->virt.ack_irq);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ return 0;
+}
+
+int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
+ if (r)
+ return r;
+ r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ INIT_DELAYED_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
+
+ return 0;
+}
+
+void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
+{
+ cancel_delayed_work_sync(&adev->virt.flr_work);
+ amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+}
+
+const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
+ .req_full_gpu = xgpu_vi_request_full_gpu_access,
+ .rel_full_gpu = xgpu_vi_release_full_gpu_access,
+ .reset_gpu = xgpu_vi_request_reset,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
new file mode 100644
index 000000000000..fd6216efd2b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __MXGPU_VI_H__
+#define __MXGPU_VI_H__
+
+#define VI_MAILBOX_TIMEDOUT 150
+#define VI_MAILBOX_RESET_TIME 12
+
+/* VI mailbox messages request */
+enum idh_request {
+ IDH_REQ_GPU_INIT_ACCESS = 1,
+ IDH_REL_GPU_INIT_ACCESS,
+ IDH_REQ_GPU_FINI_ACCESS,
+ IDH_REL_GPU_FINI_ACCESS,
+ IDH_REQ_GPU_RESET_ACCESS
+};
+
+/* VI mailbox messages data */
+enum idh_event {
+ IDH_CLR_MSG_BUF = 0,
+ IDH_READY_TO_ACCESS_GPU,
+ IDH_FLR_NOTIFICATION,
+ IDH_FLR_NOTIFICATION_CMPL,
+ IDH_EVENT_MAX
+};
+
+extern const struct amdgpu_virt_ops xgpu_vi_virt_ops;
+
+void xgpu_vi_init_golden_registers(struct amdgpu_device *adev);
+void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev);
+int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev);
+int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev);
+void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index fbe74a33899c..896be64b7013 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -701,7 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 034ace79ed49..31375bdde6f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -910,7 +910,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
@@ -1533,6 +1533,22 @@ static int sdma_v3_0_set_powergating_state(void *handle,
return 0;
}
+static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ /* AMD_CG_SUPPORT_SDMA_MGCG */
+ data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]);
+ if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK))
+ *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
+
+ /* AMD_CG_SUPPORT_SDMA_LS */
+ data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]);
+ if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
+ *flags |= AMD_CG_SUPPORT_SDMA_LS;
+}
+
static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init,
@@ -1551,6 +1567,7 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.soft_reset = sdma_v3_0_soft_reset,
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
+ .get_clockgating_state = sdma_v3_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index c46b0159007d..b71e3faa40db 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -32,7 +32,7 @@
#include "amdgpu_vce.h"
#include "atom.h"
#include "amdgpu_powerplay.h"
-#include "si/sid.h"
+#include "sid.h"
#include "si_ih.h"
#include "gfx_v6_0.h"
#include "gmc_v6_0.h"
@@ -40,337 +40,343 @@
#include "dce_v6_0.h"
#include "si.h"
#include "dce_virtual.h"
+#include "gca/gfx_6_0_d.h"
+#include "oss/oss_1_0_d.h"
+#include "gmc/gmc_6_0_d.h"
+#include "dce/dce_6_0_d.h"
+#include "uvd/uvd_4_0_d.h"
static const u32 tahiti_golden_registers[] =
{
- 0x17bc, 0x00000030, 0x00000011,
- 0x2684, 0x00010000, 0x00018208,
- 0x260c, 0xffffffff, 0x00000000,
- 0x260d, 0xf00fffff, 0x00000400,
- 0x260e, 0x0002021c, 0x00020200,
- 0x031e, 0x00000080, 0x00000000,
+ mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
+ mmCB_HW_CONTROL, 0x00010000, 0x00018208,
+ mmDB_DEBUG, 0xffffffff, 0x00000000,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmDB_DEBUG3, 0x0002021c, 0x00020200,
+ mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
0x340c, 0x000000c0, 0x00800040,
0x360c, 0x000000c0, 0x00800040,
- 0x16ec, 0x000000f0, 0x00000070,
- 0x16f0, 0x00200000, 0x50100000,
- 0x1c0c, 0x31000311, 0x00000011,
- 0x09df, 0x00000003, 0x000007ff,
- 0x0903, 0x000007ff, 0x00000000,
- 0x2285, 0xf000001f, 0x00000007,
- 0x22c9, 0xffffffff, 0x00ffffff,
- 0x22c4, 0x0000ff0f, 0x00000000,
- 0xa293, 0x07ffffff, 0x4e000000,
- 0xa0d4, 0x3f3f3fff, 0x2a00126a,
+ mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+ mmFBC_MISC, 0x00200000, 0x50100000,
+ mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
+ mmMC_ARB_WTM_CNTL_RD, 0x00000003, 0x000007ff,
+ mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
+ mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
+ mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
+ mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x2a00126a,
0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
- 0x2440, 0x07ffffff, 0x03000000,
- 0x23a2, 0x01ff1f3f, 0x00000000,
- 0x23a1, 0x01ff1f3f, 0x00000000,
- 0x2418, 0x0000007f, 0x00000020,
- 0x2542, 0x00010000, 0x00010000,
- 0x2b05, 0x00000200, 0x000002fb,
- 0x2b04, 0xffffffff, 0x0000543b,
- 0x2b03, 0xffffffff, 0xa9210876,
- 0x2234, 0xffffffff, 0x000fff40,
- 0x2235, 0x0000001f, 0x00000010,
- 0x0504, 0x20000000, 0x20fffed8,
- 0x0570, 0x000c0fc0, 0x000c0400,
- 0x052c, 0x0fffffff, 0xffffffff,
- 0x052d, 0x0fffffff, 0x0fffffff,
- 0x052e, 0x0fffffff, 0x0fffffff,
- 0x052f, 0x0fffffff, 0x0fffffff
+ mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
+ mmSQ_DED_CNT, 0x01ff1f3f, 0x00000000,
+ mmSQ_SEC_CNT, 0x01ff1f3f, 0x00000000,
+ mmSX_DEBUG_1, 0x0000007f, 0x00000020,
+ mmTA_CNTL_AUX, 0x00010000, 0x00010000,
+ mmTCP_ADDR_CONFIG, 0x00000200, 0x000002fb,
+ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
+ mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
+ mmVGT_FIFO_DEPTHS, 0xffffffff, 0x000fff40,
+ mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
+ mmVM_CONTEXT0_CNTL, 0x20000000, 0x20fffed8,
+ mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
};
static const u32 tahiti_golden_registers2[] =
{
- 0x0319, 0x00000001, 0x00000001
+ mmMCIF_MEM_CONTROL, 0x00000001, 0x00000001,
};
static const u32 tahiti_golden_rlc_registers[] =
{
- 0x263e, 0xffffffff, 0x12011003,
- 0x3109, 0xffffffff, 0x00601005,
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
+ mmRLC_LB_PARAMS, 0xffffffff, 0x00601005,
0x311f, 0xffffffff, 0x10104040,
0x3122, 0xffffffff, 0x0100000a,
- 0x30c5, 0xffffffff, 0x00000800,
- 0x30c3, 0xffffffff, 0x800000f4,
- 0x3d2a, 0x00000008, 0x00000000
+ mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800,
+ mmRLC_LB_CNTL, 0xffffffff, 0x800000f4,
+ mmUVD_CGC_GATE, 0x00000008, 0x00000000,
};
static const u32 pitcairn_golden_registers[] =
{
- 0x17bc, 0x00000030, 0x00000011,
- 0x2684, 0x00010000, 0x00018208,
- 0x260c, 0xffffffff, 0x00000000,
- 0x260d, 0xf00fffff, 0x00000400,
- 0x260e, 0x0002021c, 0x00020200,
- 0x031e, 0x00000080, 0x00000000,
+ mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
+ mmCB_HW_CONTROL, 0x00010000, 0x00018208,
+ mmDB_DEBUG, 0xffffffff, 0x00000000,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmDB_DEBUG3, 0x0002021c, 0x00020200,
+ mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
0x340c, 0x000300c0, 0x00800040,
0x360c, 0x000300c0, 0x00800040,
- 0x16ec, 0x000000f0, 0x00000070,
- 0x16f0, 0x00200000, 0x50100000,
- 0x1c0c, 0x31000311, 0x00000011,
- 0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0903, 0x000007ff, 0x00000000,
- 0x2285, 0xf000001f, 0x00000007,
- 0x22c9, 0xffffffff, 0x00ffffff,
- 0x22c4, 0x0000ff0f, 0x00000000,
- 0xa293, 0x07ffffff, 0x4e000000,
- 0xa0d4, 0x3f3f3fff, 0x2a00126a,
+ mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+ mmFBC_MISC, 0x00200000, 0x50100000,
+ mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
+ mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
+ mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
+ mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
+ mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
+ mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x2a00126a,
0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
- 0x2440, 0x07ffffff, 0x03000000,
- 0x2418, 0x0000007f, 0x00000020,
- 0x2542, 0x00010000, 0x00010000,
- 0x2b05, 0x000003ff, 0x000000f7,
- 0x2b04, 0xffffffff, 0x00000000,
- 0x2b03, 0xffffffff, 0x32761054,
- 0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400,
- 0x052c, 0x0fffffff, 0xffffffff,
- 0x052d, 0x0fffffff, 0x0fffffff,
- 0x052e, 0x0fffffff, 0x0fffffff,
- 0x052f, 0x0fffffff, 0x0fffffff
+ mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
+ mmSX_DEBUG_1, 0x0000007f, 0x00000020,
+ mmTA_CNTL_AUX, 0x00010000, 0x00010000,
+ mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
+ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+ mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
+ mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
+ mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
};
static const u32 pitcairn_golden_rlc_registers[] =
{
- 0x263e, 0xffffffff, 0x12011003,
- 0x3109, 0xffffffff, 0x00601004,
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
+ mmRLC_LB_PARAMS, 0xffffffff, 0x00601004,
0x311f, 0xffffffff, 0x10102020,
0x3122, 0xffffffff, 0x01000020,
- 0x30c5, 0xffffffff, 0x00000800,
- 0x30c3, 0xffffffff, 0x800000a4
+ mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800,
+ mmRLC_LB_CNTL, 0xffffffff, 0x800000a4,
};
static const u32 verde_pg_init[] =
{
- 0x0d4f, 0xffffffff, 0x40000,
- 0x0d4e, 0xffffffff, 0x200010ff,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x7007,
- 0x0d4e, 0xffffffff, 0x300010ff,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x400000,
- 0x0d4e, 0xffffffff, 0x100010ff,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x120200,
- 0x0d4e, 0xffffffff, 0x500010ff,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x1e1e16,
- 0x0d4e, 0xffffffff, 0x600010ff,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x171f1e,
- 0x0d4e, 0xffffffff, 0x700010ff,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4f, 0xffffffff, 0x0,
- 0x0d4e, 0xffffffff, 0x9ff,
- 0x0d40, 0xffffffff, 0x0,
- 0x0d41, 0xffffffff, 0x10000800,
- 0x0d41, 0xffffffff, 0xf,
- 0x0d41, 0xffffffff, 0xf,
- 0x0d40, 0xffffffff, 0x4,
- 0x0d41, 0xffffffff, 0x1000051e,
- 0x0d41, 0xffffffff, 0xffff,
- 0x0d41, 0xffffffff, 0xffff,
- 0x0d40, 0xffffffff, 0x8,
- 0x0d41, 0xffffffff, 0x80500,
- 0x0d40, 0xffffffff, 0x12,
- 0x0d41, 0xffffffff, 0x9050c,
- 0x0d40, 0xffffffff, 0x1d,
- 0x0d41, 0xffffffff, 0xb052c,
- 0x0d40, 0xffffffff, 0x2a,
- 0x0d41, 0xffffffff, 0x1053e,
- 0x0d40, 0xffffffff, 0x2d,
- 0x0d41, 0xffffffff, 0x10546,
- 0x0d40, 0xffffffff, 0x30,
- 0x0d41, 0xffffffff, 0xa054e,
- 0x0d40, 0xffffffff, 0x3c,
- 0x0d41, 0xffffffff, 0x1055f,
- 0x0d40, 0xffffffff, 0x3f,
- 0x0d41, 0xffffffff, 0x10567,
- 0x0d40, 0xffffffff, 0x42,
- 0x0d41, 0xffffffff, 0x1056f,
- 0x0d40, 0xffffffff, 0x45,
- 0x0d41, 0xffffffff, 0x10572,
- 0x0d40, 0xffffffff, 0x48,
- 0x0d41, 0xffffffff, 0x20575,
- 0x0d40, 0xffffffff, 0x4c,
- 0x0d41, 0xffffffff, 0x190801,
- 0x0d40, 0xffffffff, 0x67,
- 0x0d41, 0xffffffff, 0x1082a,
- 0x0d40, 0xffffffff, 0x6a,
- 0x0d41, 0xffffffff, 0x1b082d,
- 0x0d40, 0xffffffff, 0x87,
- 0x0d41, 0xffffffff, 0x310851,
- 0x0d40, 0xffffffff, 0xba,
- 0x0d41, 0xffffffff, 0x891,
- 0x0d40, 0xffffffff, 0xbc,
- 0x0d41, 0xffffffff, 0x893,
- 0x0d40, 0xffffffff, 0xbe,
- 0x0d41, 0xffffffff, 0x20895,
- 0x0d40, 0xffffffff, 0xc2,
- 0x0d41, 0xffffffff, 0x20899,
- 0x0d40, 0xffffffff, 0xc6,
- 0x0d41, 0xffffffff, 0x2089d,
- 0x0d40, 0xffffffff, 0xca,
- 0x0d41, 0xffffffff, 0x8a1,
- 0x0d40, 0xffffffff, 0xcc,
- 0x0d41, 0xffffffff, 0x8a3,
- 0x0d40, 0xffffffff, 0xce,
- 0x0d41, 0xffffffff, 0x308a5,
- 0x0d40, 0xffffffff, 0xd3,
- 0x0d41, 0xffffffff, 0x6d08cd,
- 0x0d40, 0xffffffff, 0x142,
- 0x0d41, 0xffffffff, 0x2000095a,
- 0x0d41, 0xffffffff, 0x1,
- 0x0d40, 0xffffffff, 0x144,
- 0x0d41, 0xffffffff, 0x301f095b,
- 0x0d40, 0xffffffff, 0x165,
- 0x0d41, 0xffffffff, 0xc094d,
- 0x0d40, 0xffffffff, 0x173,
- 0x0d41, 0xffffffff, 0xf096d,
- 0x0d40, 0xffffffff, 0x184,
- 0x0d41, 0xffffffff, 0x15097f,
- 0x0d40, 0xffffffff, 0x19b,
- 0x0d41, 0xffffffff, 0xc0998,
- 0x0d40, 0xffffffff, 0x1a9,
- 0x0d41, 0xffffffff, 0x409a7,
- 0x0d40, 0xffffffff, 0x1af,
- 0x0d41, 0xffffffff, 0xcdc,
- 0x0d40, 0xffffffff, 0x1b1,
- 0x0d41, 0xffffffff, 0x800,
- 0x0d42, 0xffffffff, 0x6c9b2000,
- 0x0d44, 0xfc00, 0x2000,
- 0x0d51, 0xffffffff, 0xfc0,
- 0x0a35, 0x00000100, 0x100
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x40000,
+ mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x200010ff,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x7007,
+ mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x300010ff,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x400000,
+ mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x100010ff,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x120200,
+ mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x500010ff,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x1e1e16,
+ mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x600010ff,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x171f1e,
+ mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x700010ff,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
+ mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x9ff,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x0,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10000800,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x4,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1000051e,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xffff,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xffff,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x8,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x80500,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x12,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x9050c,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1d,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xb052c,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x2a,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1053e,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x2d,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10546,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x30,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xa054e,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x3c,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1055f,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x3f,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10567,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x42,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1056f,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x45,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10572,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x48,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20575,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x4c,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x190801,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x67,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1082a,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x6a,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1b082d,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x87,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x310851,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xba,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x891,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xbc,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x893,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xbe,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20895,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xc2,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20899,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xc6,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x2089d,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xca,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x8a1,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xcc,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x8a3,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xce,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x308a5,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xd3,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x6d08cd,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x142,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x2000095a,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x144,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x301f095b,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x165,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xc094d,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x173,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf096d,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x184,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x15097f,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x19b,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xc0998,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1a9,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x409a7,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1af,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xcdc,
+ mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1b1,
+ mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x800,
+ mmGMCON_RENG_EXECUTE, 0xffffffff, 0x6c9b2000,
+ mmGMCON_MISC2, 0xfc00, 0x2000,
+ mmGMCON_MISC3, 0xffffffff, 0xfc0,
+ mmMC_PMG_AUTO_CFG, 0x00000100, 0x100,
};
static const u32 verde_golden_rlc_registers[] =
{
- 0x263e, 0xffffffff, 0x02010002,
- 0x3109, 0xffffffff, 0x033f1005,
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x02010002,
+ mmRLC_LB_PARAMS, 0xffffffff, 0x033f1005,
0x311f, 0xffffffff, 0x10808020,
0x3122, 0xffffffff, 0x00800008,
- 0x30c5, 0xffffffff, 0x00001000,
- 0x30c3, 0xffffffff, 0x80010014
+ mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00001000,
+ mmRLC_LB_CNTL, 0xffffffff, 0x80010014,
};
static const u32 verde_golden_registers[] =
{
- 0x17bc, 0x00000030, 0x00000011,
- 0x2684, 0x00010000, 0x00018208,
- 0x260c, 0xffffffff, 0x00000000,
- 0x260d, 0xf00fffff, 0x00000400,
- 0x260e, 0x0002021c, 0x00020200,
- 0x031e, 0x00000080, 0x00000000,
+ mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
+ mmCB_HW_CONTROL, 0x00010000, 0x00018208,
+ mmDB_DEBUG, 0xffffffff, 0x00000000,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmDB_DEBUG3, 0x0002021c, 0x00020200,
+ mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
0x340c, 0x000300c0, 0x00800040,
0x360c, 0x000300c0, 0x00800040,
- 0x16ec, 0x000000f0, 0x00000070,
- 0x16f0, 0x00200000, 0x50100000,
- 0x1c0c, 0x31000311, 0x00000011,
- 0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0903, 0x000007ff, 0x00000000,
- 0x2285, 0xf000001f, 0x00000007,
- 0x22c9, 0xffffffff, 0x00ffffff,
- 0x22c4, 0x0000ff0f, 0x00000000,
- 0xa293, 0x07ffffff, 0x4e000000,
- 0xa0d4, 0x3f3f3fff, 0x0000124a,
+ mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+ mmFBC_MISC, 0x00200000, 0x50100000,
+ mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
+ mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
+ mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
+ mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
+ mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
+ mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x0000124a,
0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
- 0x2440, 0x07ffffff, 0x03000000,
- 0x23a2, 0x01ff1f3f, 0x00000000,
- 0x23a1, 0x01ff1f3f, 0x00000000,
- 0x2418, 0x0000007f, 0x00000020,
- 0x2542, 0x00010000, 0x00010000,
- 0x2b05, 0x000003ff, 0x00000003,
- 0x2b04, 0xffffffff, 0x00000000,
- 0x2b03, 0xffffffff, 0x00001032,
- 0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400,
- 0x052c, 0x0fffffff, 0xffffffff,
- 0x052d, 0x0fffffff, 0x0fffffff,
- 0x052e, 0x0fffffff, 0x0fffffff,
- 0x052f, 0x0fffffff, 0x0fffffff
+ mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
+ mmSQ_DED_CNT, 0x01ff1f3f, 0x00000000,
+ mmSQ_SEC_CNT, 0x01ff1f3f, 0x00000000,
+ mmSX_DEBUG_1, 0x0000007f, 0x00000020,
+ mmTA_CNTL_AUX, 0x00010000, 0x00010000,
+ mmTCP_ADDR_CONFIG, 0x000003ff, 0x00000003,
+ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+ mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001032,
+ mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
+ mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
};
static const u32 oland_golden_registers[] =
{
- 0x17bc, 0x00000030, 0x00000011,
- 0x2684, 0x00010000, 0x00018208,
- 0x260c, 0xffffffff, 0x00000000,
- 0x260d, 0xf00fffff, 0x00000400,
- 0x260e, 0x0002021c, 0x00020200,
- 0x031e, 0x00000080, 0x00000000,
+ mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
+ mmCB_HW_CONTROL, 0x00010000, 0x00018208,
+ mmDB_DEBUG, 0xffffffff, 0x00000000,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmDB_DEBUG3, 0x0002021c, 0x00020200,
+ mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
0x340c, 0x000300c0, 0x00800040,
0x360c, 0x000300c0, 0x00800040,
- 0x16ec, 0x000000f0, 0x00000070,
- 0x16f0, 0x00200000, 0x50100000,
- 0x1c0c, 0x31000311, 0x00000011,
- 0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0903, 0x000007ff, 0x00000000,
- 0x2285, 0xf000001f, 0x00000007,
- 0x22c9, 0xffffffff, 0x00ffffff,
- 0x22c4, 0x0000ff0f, 0x00000000,
- 0xa293, 0x07ffffff, 0x4e000000,
- 0xa0d4, 0x3f3f3fff, 0x00000082,
+ mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+ mmFBC_MISC, 0x00200000, 0x50100000,
+ mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
+ mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
+ mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
+ mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
+ mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
+ mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x00000082,
0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
- 0x2440, 0x07ffffff, 0x03000000,
- 0x2418, 0x0000007f, 0x00000020,
- 0x2542, 0x00010000, 0x00010000,
- 0x2b05, 0x000003ff, 0x000000f3,
- 0x2b04, 0xffffffff, 0x00000000,
- 0x2b03, 0xffffffff, 0x00003210,
- 0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400,
- 0x052c, 0x0fffffff, 0xffffffff,
- 0x052d, 0x0fffffff, 0x0fffffff,
- 0x052e, 0x0fffffff, 0x0fffffff,
- 0x052f, 0x0fffffff, 0x0fffffff
+ mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
+ mmSX_DEBUG_1, 0x0000007f, 0x00000020,
+ mmTA_CNTL_AUX, 0x00010000, 0x00010000,
+ mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
+ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+ mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
+ mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
+ mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+
};
static const u32 oland_golden_rlc_registers[] =
{
- 0x263e, 0xffffffff, 0x02010002,
- 0x3109, 0xffffffff, 0x00601005,
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x02010002,
+ mmRLC_LB_PARAMS, 0xffffffff, 0x00601005,
0x311f, 0xffffffff, 0x10104040,
0x3122, 0xffffffff, 0x0100000a,
- 0x30c5, 0xffffffff, 0x00000800,
- 0x30c3, 0xffffffff, 0x800000f4
+ mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800,
+ mmRLC_LB_CNTL, 0xffffffff, 0x800000f4,
};
static const u32 hainan_golden_registers[] =
{
0x17bc, 0x00000030, 0x00000011,
- 0x2684, 0x00010000, 0x00018208,
- 0x260c, 0xffffffff, 0x00000000,
- 0x260d, 0xf00fffff, 0x00000400,
- 0x260e, 0x0002021c, 0x00020200,
+ mmCB_HW_CONTROL, 0x00010000, 0x00018208,
+ mmDB_DEBUG, 0xffffffff, 0x00000000,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmDB_DEBUG3, 0x0002021c, 0x00020200,
0x031e, 0x00000080, 0x00000000,
0x3430, 0xff000fff, 0x00000100,
0x340c, 0x000300c0, 0x00800040,
@@ -379,63 +385,63 @@ static const u32 hainan_golden_registers[] =
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0x00200000, 0x50100000,
0x1c0c, 0x31000311, 0x00000011,
- 0x0ab9, 0x00073ffe, 0x000022a2,
- 0x0903, 0x000007ff, 0x00000000,
- 0x2285, 0xf000001f, 0x00000007,
- 0x22c9, 0xffffffff, 0x00ffffff,
- 0x22c4, 0x0000ff0f, 0x00000000,
- 0xa293, 0x07ffffff, 0x4e000000,
- 0xa0d4, 0x3f3f3fff, 0x00000000,
+ mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
+ mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
+ mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
+ mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
+ mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x00000000,
0x000c, 0xffffffff, 0x0040,
0x000d, 0x00000040, 0x00004040,
- 0x2440, 0x03e00000, 0x03600000,
- 0x2418, 0x0000007f, 0x00000020,
- 0x2542, 0x00010000, 0x00010000,
- 0x2b05, 0x000003ff, 0x000000f1,
- 0x2b04, 0xffffffff, 0x00000000,
- 0x2b03, 0xffffffff, 0x00003210,
- 0x2235, 0x0000001f, 0x00000010,
- 0x0570, 0x000c0fc0, 0x000c0400,
- 0x052c, 0x0fffffff, 0xffffffff,
- 0x052d, 0x0fffffff, 0x0fffffff,
- 0x052e, 0x0fffffff, 0x0fffffff,
- 0x052f, 0x0fffffff, 0x0fffffff
+ mmSPI_CONFIG_CNTL, 0x03e00000, 0x03600000,
+ mmSX_DEBUG_1, 0x0000007f, 0x00000020,
+ mmTA_CNTL_AUX, 0x00010000, 0x00010000,
+ mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
+ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+ mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
+ mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
+ mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
};
static const u32 hainan_golden_registers2[] =
{
- 0x263e, 0xffffffff, 0x2011003
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x2011003,
};
static const u32 tahiti_mgcg_cgcg_init[] =
{
- 0x3100, 0xffffffff, 0xfffffffc,
- 0x200b, 0xffffffff, 0xe0000000,
- 0x2698, 0xffffffff, 0x00000100,
- 0x24a9, 0xffffffff, 0x00000100,
- 0x3059, 0xffffffff, 0x00000100,
- 0x25dd, 0xffffffff, 0x00000100,
- 0x2261, 0xffffffff, 0x06000100,
- 0x2286, 0xffffffff, 0x00000100,
- 0x24a8, 0xffffffff, 0x00000100,
- 0x30e0, 0xffffffff, 0x00000100,
- 0x22ca, 0xffffffff, 0x00000100,
- 0x2451, 0xffffffff, 0x00000100,
- 0x2362, 0xffffffff, 0x00000100,
- 0x2363, 0xffffffff, 0x00000100,
- 0x240c, 0xffffffff, 0x00000100,
- 0x240d, 0xffffffff, 0x00000100,
- 0x240e, 0xffffffff, 0x00000100,
- 0x240f, 0xffffffff, 0x00000100,
- 0x2b60, 0xffffffff, 0x00000100,
- 0x2b15, 0xffffffff, 0x00000100,
- 0x225f, 0xffffffff, 0x06000100,
- 0x261a, 0xffffffff, 0x00000100,
- 0x2544, 0xffffffff, 0x00000100,
- 0x2bc1, 0xffffffff, 0x00000100,
- 0x2b81, 0xffffffff, 0x00000100,
- 0x2527, 0xffffffff, 0x00000100,
- 0x200b, 0xffffffff, 0xe0000000,
+ mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+ mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+ mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0x2458, 0xffffffff, 0x00010000,
0x2459, 0xffffffff, 0x00030002,
0x245a, 0xffffffff, 0x00040007,
@@ -516,55 +522,55 @@ static const u32 tahiti_mgcg_cgcg_init[] =
0x24a5, 0xffffffff, 0x00000015,
0x24a6, 0xffffffff, 0x00140013,
0x24a7, 0xffffffff, 0x00170016,
- 0x2454, 0xffffffff, 0x96940200,
- 0x21c2, 0xffffffff, 0x00900100,
- 0x311e, 0xffffffff, 0x00000080,
- 0x3101, 0xffffffff, 0x0020003f,
+ mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
+ mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+ mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
+ mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
0x000c, 0xffffffff, 0x0000001c,
0x000d, 0x000f0000, 0x000f0000,
0x0583, 0xffffffff, 0x00000100,
- 0x0409, 0xffffffff, 0x00000100,
- 0x040b, 0x00000101, 0x00000000,
- 0x082a, 0xffffffff, 0x00000104,
- 0x0993, 0x000c0000, 0x000c0000,
- 0x0992, 0x000c0000, 0x000c0000,
- 0x1579, 0xff000fff, 0x00000100,
+ mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+ mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
+ mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
+ mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
+ mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0x0bd4, 0x00000001, 0x00000001,
- 0x0c33, 0xc0000fff, 0x00000104,
- 0x3079, 0x00000001, 0x00000001,
+ mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
+ mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+ mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
- 0x3630, 0xfffffff0, 0x00000100
+ 0x3630, 0xfffffff0, 0x00000100,
};
static const u32 pitcairn_mgcg_cgcg_init[] =
{
- 0x3100, 0xffffffff, 0xfffffffc,
- 0x200b, 0xffffffff, 0xe0000000,
- 0x2698, 0xffffffff, 0x00000100,
- 0x24a9, 0xffffffff, 0x00000100,
- 0x3059, 0xffffffff, 0x00000100,
- 0x25dd, 0xffffffff, 0x00000100,
- 0x2261, 0xffffffff, 0x06000100,
- 0x2286, 0xffffffff, 0x00000100,
- 0x24a8, 0xffffffff, 0x00000100,
- 0x30e0, 0xffffffff, 0x00000100,
- 0x22ca, 0xffffffff, 0x00000100,
- 0x2451, 0xffffffff, 0x00000100,
- 0x2362, 0xffffffff, 0x00000100,
- 0x2363, 0xffffffff, 0x00000100,
- 0x240c, 0xffffffff, 0x00000100,
- 0x240d, 0xffffffff, 0x00000100,
- 0x240e, 0xffffffff, 0x00000100,
- 0x240f, 0xffffffff, 0x00000100,
- 0x2b60, 0xffffffff, 0x00000100,
- 0x2b15, 0xffffffff, 0x00000100,
- 0x225f, 0xffffffff, 0x06000100,
- 0x261a, 0xffffffff, 0x00000100,
- 0x2544, 0xffffffff, 0x00000100,
- 0x2bc1, 0xffffffff, 0x00000100,
- 0x2b81, 0xffffffff, 0x00000100,
- 0x2527, 0xffffffff, 0x00000100,
- 0x200b, 0xffffffff, 0xe0000000,
+ mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+ mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+ mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0x2458, 0xffffffff, 0x00010000,
0x2459, 0xffffffff, 0x00030002,
0x245a, 0xffffffff, 0x00040007,
@@ -615,53 +621,54 @@ static const u32 pitcairn_mgcg_cgcg_init[] =
0x2496, 0xffffffff, 0x00100013,
0x2497, 0xffffffff, 0x00120011,
0x2498, 0xffffffff, 0x00150014,
- 0x2454, 0xffffffff, 0x96940200,
- 0x21c2, 0xffffffff, 0x00900100,
- 0x311e, 0xffffffff, 0x00000080,
- 0x3101, 0xffffffff, 0x0020003f,
+ mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
+ mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+ mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
+ mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
0x000c, 0xffffffff, 0x0000001c,
0x000d, 0x000f0000, 0x000f0000,
0x0583, 0xffffffff, 0x00000100,
- 0x0409, 0xffffffff, 0x00000100,
- 0x040b, 0x00000101, 0x00000000,
- 0x082a, 0xffffffff, 0x00000104,
- 0x1579, 0xff000fff, 0x00000100,
+ mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+ mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
+ mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0x0bd4, 0x00000001, 0x00000001,
- 0x0c33, 0xc0000fff, 0x00000104,
- 0x3079, 0x00000001, 0x00000001,
+ mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
+ mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+ mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
- 0x3630, 0xfffffff0, 0x00000100
+ 0x3630, 0xfffffff0, 0x00000100,
};
+
static const u32 verde_mgcg_cgcg_init[] =
{
- 0x3100, 0xffffffff, 0xfffffffc,
- 0x200b, 0xffffffff, 0xe0000000,
- 0x2698, 0xffffffff, 0x00000100,
- 0x24a9, 0xffffffff, 0x00000100,
- 0x3059, 0xffffffff, 0x00000100,
- 0x25dd, 0xffffffff, 0x00000100,
- 0x2261, 0xffffffff, 0x06000100,
- 0x2286, 0xffffffff, 0x00000100,
- 0x24a8, 0xffffffff, 0x00000100,
- 0x30e0, 0xffffffff, 0x00000100,
- 0x22ca, 0xffffffff, 0x00000100,
- 0x2451, 0xffffffff, 0x00000100,
- 0x2362, 0xffffffff, 0x00000100,
- 0x2363, 0xffffffff, 0x00000100,
- 0x240c, 0xffffffff, 0x00000100,
- 0x240d, 0xffffffff, 0x00000100,
- 0x240e, 0xffffffff, 0x00000100,
- 0x240f, 0xffffffff, 0x00000100,
- 0x2b60, 0xffffffff, 0x00000100,
- 0x2b15, 0xffffffff, 0x00000100,
- 0x225f, 0xffffffff, 0x06000100,
- 0x261a, 0xffffffff, 0x00000100,
- 0x2544, 0xffffffff, 0x00000100,
- 0x2bc1, 0xffffffff, 0x00000100,
- 0x2b81, 0xffffffff, 0x00000100,
- 0x2527, 0xffffffff, 0x00000100,
- 0x200b, 0xffffffff, 0xe0000000,
+ mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+ mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+ mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0x2458, 0xffffffff, 0x00010000,
0x2459, 0xffffffff, 0x00030002,
0x245a, 0xffffffff, 0x00040007,
@@ -712,55 +719,56 @@ static const u32 verde_mgcg_cgcg_init[] =
0x2496, 0xffffffff, 0x00100013,
0x2497, 0xffffffff, 0x00120011,
0x2498, 0xffffffff, 0x00150014,
- 0x2454, 0xffffffff, 0x96940200,
- 0x21c2, 0xffffffff, 0x00900100,
- 0x311e, 0xffffffff, 0x00000080,
- 0x3101, 0xffffffff, 0x0020003f,
+ mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
+ mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+ mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
+ mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
0x000c, 0xffffffff, 0x0000001c,
0x000d, 0x000f0000, 0x000f0000,
0x0583, 0xffffffff, 0x00000100,
- 0x0409, 0xffffffff, 0x00000100,
- 0x040b, 0x00000101, 0x00000000,
- 0x082a, 0xffffffff, 0x00000104,
- 0x0993, 0x000c0000, 0x000c0000,
- 0x0992, 0x000c0000, 0x000c0000,
- 0x1579, 0xff000fff, 0x00000100,
+ mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+ mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
+ mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
+ mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
+ mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0x0bd4, 0x00000001, 0x00000001,
- 0x0c33, 0xc0000fff, 0x00000104,
- 0x3079, 0x00000001, 0x00000001,
+ mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
+ mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+ mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
- 0x3630, 0xfffffff0, 0x00000100
+ 0x3630, 0xfffffff0, 0x00000100,
};
+
static const u32 oland_mgcg_cgcg_init[] =
{
- 0x3100, 0xffffffff, 0xfffffffc,
- 0x200b, 0xffffffff, 0xe0000000,
- 0x2698, 0xffffffff, 0x00000100,
- 0x24a9, 0xffffffff, 0x00000100,
- 0x3059, 0xffffffff, 0x00000100,
- 0x25dd, 0xffffffff, 0x00000100,
- 0x2261, 0xffffffff, 0x06000100,
- 0x2286, 0xffffffff, 0x00000100,
- 0x24a8, 0xffffffff, 0x00000100,
- 0x30e0, 0xffffffff, 0x00000100,
- 0x22ca, 0xffffffff, 0x00000100,
- 0x2451, 0xffffffff, 0x00000100,
- 0x2362, 0xffffffff, 0x00000100,
- 0x2363, 0xffffffff, 0x00000100,
- 0x240c, 0xffffffff, 0x00000100,
- 0x240d, 0xffffffff, 0x00000100,
- 0x240e, 0xffffffff, 0x00000100,
- 0x240f, 0xffffffff, 0x00000100,
- 0x2b60, 0xffffffff, 0x00000100,
- 0x2b15, 0xffffffff, 0x00000100,
- 0x225f, 0xffffffff, 0x06000100,
- 0x261a, 0xffffffff, 0x00000100,
- 0x2544, 0xffffffff, 0x00000100,
- 0x2bc1, 0xffffffff, 0x00000100,
- 0x2b81, 0xffffffff, 0x00000100,
- 0x2527, 0xffffffff, 0x00000100,
- 0x200b, 0xffffffff, 0xe0000000,
+ mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+ mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+ mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0x2458, 0xffffffff, 0x00010000,
0x2459, 0xffffffff, 0x00030002,
0x245a, 0xffffffff, 0x00040007,
@@ -791,55 +799,56 @@ static const u32 oland_mgcg_cgcg_init[] =
0x2473, 0xffffffff, 0x0000000b,
0x2474, 0xffffffff, 0x000a0009,
0x2475, 0xffffffff, 0x000d000c,
- 0x2454, 0xffffffff, 0x96940200,
- 0x21c2, 0xffffffff, 0x00900100,
- 0x311e, 0xffffffff, 0x00000080,
- 0x3101, 0xffffffff, 0x0020003f,
+ mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
+ mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+ mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
+ mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
0x000c, 0xffffffff, 0x0000001c,
0x000d, 0x000f0000, 0x000f0000,
0x0583, 0xffffffff, 0x00000100,
- 0x0409, 0xffffffff, 0x00000100,
- 0x040b, 0x00000101, 0x00000000,
- 0x082a, 0xffffffff, 0x00000104,
- 0x0993, 0x000c0000, 0x000c0000,
- 0x0992, 0x000c0000, 0x000c0000,
- 0x1579, 0xff000fff, 0x00000100,
+ mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+ mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
+ mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
+ mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
+ mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
0x157a, 0x00000001, 0x00000001,
- 0x0bd4, 0x00000001, 0x00000001,
- 0x0c33, 0xc0000fff, 0x00000104,
- 0x3079, 0x00000001, 0x00000001,
+ mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
+ mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+ mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
- 0x3630, 0xfffffff0, 0x00000100
+ 0x3630, 0xfffffff0, 0x00000100,
};
+
static const u32 hainan_mgcg_cgcg_init[] =
{
- 0x3100, 0xffffffff, 0xfffffffc,
- 0x200b, 0xffffffff, 0xe0000000,
- 0x2698, 0xffffffff, 0x00000100,
- 0x24a9, 0xffffffff, 0x00000100,
- 0x3059, 0xffffffff, 0x00000100,
- 0x25dd, 0xffffffff, 0x00000100,
- 0x2261, 0xffffffff, 0x06000100,
- 0x2286, 0xffffffff, 0x00000100,
- 0x24a8, 0xffffffff, 0x00000100,
- 0x30e0, 0xffffffff, 0x00000100,
- 0x22ca, 0xffffffff, 0x00000100,
- 0x2451, 0xffffffff, 0x00000100,
- 0x2362, 0xffffffff, 0x00000100,
- 0x2363, 0xffffffff, 0x00000100,
- 0x240c, 0xffffffff, 0x00000100,
- 0x240d, 0xffffffff, 0x00000100,
- 0x240e, 0xffffffff, 0x00000100,
- 0x240f, 0xffffffff, 0x00000100,
- 0x2b60, 0xffffffff, 0x00000100,
- 0x2b15, 0xffffffff, 0x00000100,
- 0x225f, 0xffffffff, 0x06000100,
- 0x261a, 0xffffffff, 0x00000100,
- 0x2544, 0xffffffff, 0x00000100,
- 0x2bc1, 0xffffffff, 0x00000100,
- 0x2b81, 0xffffffff, 0x00000100,
- 0x2527, 0xffffffff, 0x00000100,
- 0x200b, 0xffffffff, 0xe0000000,
+ mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+ mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+ mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+ mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+ mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+ mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+ mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
0x2458, 0xffffffff, 0x00010000,
0x2459, 0xffffffff, 0x00030002,
0x245a, 0xffffffff, 0x00040007,
@@ -870,22 +879,22 @@ static const u32 hainan_mgcg_cgcg_init[] =
0x2473, 0xffffffff, 0x0000000b,
0x2474, 0xffffffff, 0x000a0009,
0x2475, 0xffffffff, 0x000d000c,
- 0x2454, 0xffffffff, 0x96940200,
- 0x21c2, 0xffffffff, 0x00900100,
- 0x311e, 0xffffffff, 0x00000080,
- 0x3101, 0xffffffff, 0x0020003f,
+ mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
+ mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+ mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
+ mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
0x000c, 0xffffffff, 0x0000001c,
0x000d, 0x000f0000, 0x000f0000,
0x0583, 0xffffffff, 0x00000100,
0x0409, 0xffffffff, 0x00000100,
- 0x082a, 0xffffffff, 0x00000104,
- 0x0993, 0x000c0000, 0x000c0000,
- 0x0992, 0x000c0000, 0x000c0000,
- 0x0bd4, 0x00000001, 0x00000001,
- 0x0c33, 0xc0000fff, 0x00000104,
- 0x3079, 0x00000001, 0x00000001,
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
+ mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
+ mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
+ mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
+ mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+ mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
0x3430, 0xfffffff0, 0x00000100,
- 0x3630, 0xfffffff0, 0x00000100
+ 0x3630, 0xfffffff0, 0x00000100,
};
static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
@@ -1001,24 +1010,81 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{PA_SC_RASTER_CONFIG, false, true},
};
-static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
- u32 se_num, u32 sh_num,
- u32 reg_offset)
+static uint32_t si_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
{
- uint32_t val;
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
-
static int si_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value)
{
@@ -1030,10 +1096,9 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num,
continue;
if (!si_allowed_read_registers[i].untouched)
- *value = si_allowed_read_registers[i].grbm_indexed ?
- si_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = si_get_register_value(adev,
+ si_allowed_read_registers[i].grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
return -EINVAL;
@@ -1129,13 +1194,12 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
static void si_detect_hw_virtualization(struct amdgpu_device *adev)
{
if (is_virtual_machine()) /* passthrough mode */
- adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
- .detect_hw_virtualization = si_detect_hw_virtualization,
.read_register = &si_read_register,
.reset = &si_asic_reset,
.set_vga_state = &si_vga_set_state,
@@ -1852,6 +1916,8 @@ static const struct amdgpu_ip_block_version si_common_ip_block =
int si_set_ip_blocks(struct amdgpu_device *adev)
{
+ si_detect_hw_virtualization(adev);
+
switch (adev->asic_type) {
case CHIP_VERDE:
case CHIP_TAHITI:
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 3dd552ae0b59..3372a071bb85 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -24,7 +24,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
-#include "si/sid.h"
+#include "sid.h"
const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
@@ -301,7 +301,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
ib.ptr[3] = 0xDEADBEEF;
ib.length_dw = 4;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 6e150db8f380..f55e45b52fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -26,7 +26,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
#include "amdgpu_atombios.h"
-#include "si/sid.h"
+#include "sid.h"
#include "r600_dpm.h"
#include "si_dpm.h"
#include "atom.h"
@@ -3009,29 +3009,6 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev)
return ret;
}
-struct si_dpm_quirk {
- u32 chip_vendor;
- u32 chip_device;
- u32 subsys_vendor;
- u32 subsys_device;
- u32 max_sclk;
- u32 max_mclk;
-};
-
-/* cards with dpm stability problems */
-static struct si_dpm_quirk si_dpm_quirk_list[] = {
- /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
- { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
- { 0, 0, 0, 0 },
-};
-
static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
u16 vce_voltage)
{
@@ -3477,18 +3454,8 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
u32 max_sclk = 0, max_mclk = 0;
int i;
- struct si_dpm_quirk *p = si_dpm_quirk_list;
- /* limit all SI kickers */
- if (adev->asic_type == CHIP_PITCAIRN) {
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811) ||
- (adev->pdev->device == 0x6816) ||
- (adev->pdev->device == 0x6817) ||
- (adev->pdev->device == 0x6806))
- max_mclk = 120000;
- } else if (adev->asic_type == CHIP_HAINAN) {
+ if (adev->asic_type == CHIP_HAINAN) {
if ((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0xC3) ||
@@ -3498,18 +3465,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
max_sclk = 75000;
}
}
- /* Apply dpm quirks */
- while (p && p->chip_device != 0) {
- if (adev->pdev->vendor == p->chip_vendor &&
- adev->pdev->device == p->chip_device &&
- adev->pdev->subsystem_vendor == p->subsys_vendor &&
- adev->pdev->subsystem_device == p->subsys_device) {
- max_sclk = p->max_sclk;
- max_mclk = p->max_mclk;
- break;
- }
- ++p;
- }
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
@@ -3906,25 +3861,25 @@ static int si_restrict_performance_levels_before_switch(struct amdgpu_device *ad
}
static int si_dpm_force_performance_level(struct amdgpu_device *adev,
- enum amdgpu_dpm_forced_level level)
+ enum amd_dpm_forced_level level)
{
struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
struct si_ps *ps = si_get_ps(rps);
u32 levels = ps->performance_level_count;
- if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+ if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
- } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+ } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
- } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+ } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
@@ -7746,7 +7701,7 @@ static int si_dpm_sw_init(void *handle)
/* default to balanced state */
adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+ adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
adev->pm.default_sclk = adev->clock.default_sclk;
adev->pm.default_mclk = adev->clock.default_mclk;
adev->pm.current_sclk = adev->clock.default_sclk;
@@ -8072,11 +8027,3 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
}
-const struct amdgpu_ip_block_version si_dpm_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dpm_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
index fde2086246fa..dc9e0e6b4558 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -143,8 +143,8 @@
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
-#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
-#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index db0f36846661..81f90800ba73 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -23,7 +23,7 @@
#include "drmP.h"
#include "amdgpu.h"
#include "amdgpu_ih.h"
-#include "si/sid.h"
+#include "sid.h"
#include "si_ih.h"
static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
index 668ba99d6c05..0726bc3b6f90 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_smc.c
@@ -25,7 +25,7 @@
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
-#include "si/sid.h"
+#include "sid.h"
#include "ppsmc.h"
#include "amdgpu_ucode.h"
#include "sislands_smc.h"
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index c57eff159374..c57eff159374 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h
deleted file mode 100644
index 880152c0f775..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef SMU_UCODE_XFER_VI_H
-#define SMU_UCODE_XFER_VI_H
-
-#define SMU_DRAMData_TOC_VERSION 1
-#define MAX_IH_REGISTER_COUNT 65535
-#define SMU_DIGEST_SIZE_BYTES 20
-#define SMU_FB_SIZE_BYTES 1048576
-#define SMU_MAX_ENTRIES 12
-
-#define UCODE_ID_SMU 0
-#define UCODE_ID_SDMA0 1
-#define UCODE_ID_SDMA1 2
-#define UCODE_ID_CP_CE 3
-#define UCODE_ID_CP_PFP 4
-#define UCODE_ID_CP_ME 5
-#define UCODE_ID_CP_MEC 6
-#define UCODE_ID_CP_MEC_JT1 7
-#define UCODE_ID_CP_MEC_JT2 8
-#define UCODE_ID_GMCON_RENG 9
-#define UCODE_ID_RLC_G 10
-#define UCODE_ID_IH_REG_RESTORE 11
-#define UCODE_ID_VBIOS 12
-#define UCODE_ID_MISC_METADATA 13
-#define UCODE_ID_SMU_SK 14
-#define UCODE_ID_RLC_SCRATCH 32
-#define UCODE_ID_RLC_SRM_ARAM 33
-#define UCODE_ID_RLC_SRM_DRAM 34
-#define UCODE_ID_MEC_STORAGE 35
-#define UCODE_ID_VBIOS_PARAMETERS 36
-#define UCODE_META_DATA 0xFF
-
-#define UCODE_ID_SMU_MASK 0x00000001
-#define UCODE_ID_SDMA0_MASK 0x00000002
-#define UCODE_ID_SDMA1_MASK 0x00000004
-#define UCODE_ID_CP_CE_MASK 0x00000008
-#define UCODE_ID_CP_PFP_MASK 0x00000010
-#define UCODE_ID_CP_ME_MASK 0x00000020
-#define UCODE_ID_CP_MEC_MASK 0x00000040
-#define UCODE_ID_CP_MEC_JT1_MASK 0x00000080
-#define UCODE_ID_CP_MEC_JT2_MASK 0x00000100
-#define UCODE_ID_GMCON_RENG_MASK 0x00000200
-#define UCODE_ID_RLC_G_MASK 0x00000400
-#define UCODE_ID_IH_REG_RESTORE_MASK 0x00000800
-#define UCODE_ID_VBIOS_MASK 0x00001000
-
-#define UCODE_FLAG_UNHALT_MASK 0x1
-
-struct SMU_Entry {
-#ifndef __BIG_ENDIAN
- uint16_t id;
- uint16_t version;
- uint32_t image_addr_high;
- uint32_t image_addr_low;
- uint32_t meta_data_addr_high;
- uint32_t meta_data_addr_low;
- uint32_t data_size_byte;
- uint16_t flags;
- uint16_t num_register_entries;
-#else
- uint16_t version;
- uint16_t id;
- uint32_t image_addr_high;
- uint32_t image_addr_low;
- uint32_t meta_data_addr_high;
- uint32_t meta_data_addr_low;
- uint32_t data_size_byte;
- uint16_t num_register_entries;
- uint16_t flags;
-#endif
-};
-
-struct SMU_DRAMData_TOC {
- uint32_t structure_version;
- uint32_t num_entries;
- struct SMU_Entry entry[SMU_MAX_ENTRIES];
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 7fb9137dd89b..b34cefc7ebd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -159,9 +159,6 @@ static int uvd_v4_2_hw_init(void *handle)
uvd_v4_2_enable_mgcg(adev, true);
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- r = uvd_v4_2_start(adev);
- if (r)
- goto done;
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
@@ -198,7 +195,6 @@ static int uvd_v4_2_hw_init(void *handle)
amdgpu_ring_commit(ring);
done:
-
if (!r)
DRM_INFO("UVD initialized successfully.\n");
@@ -217,7 +213,9 @@ static int uvd_v4_2_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
- uvd_v4_2_stop(adev);
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v4_2_stop(adev);
+
ring->ready = false;
return 0;
@@ -267,37 +265,26 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->uvd.ring;
uint32_t rb_bufsz;
int i, j, r;
+ u32 tmp;
/* disable byte swapping */
u32 lmi_swap_cntl = 0;
u32 mp_swap_cntl = 0;
- WREG32(mmUVD_CGC_GATE, 0);
- uvd_v4_2_set_dcm(adev, true);
-
- uvd_v4_2_mc_resume(adev);
+ /* set uvd busy */
+ WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
- /* disable interupt */
- WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
-
- /* Stall UMC and register bus before resetting VCPU */
- WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
- mdelay(1);
-
- /* put LMI, VCPU, RBC etc... into reset */
- WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
- UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
- UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
- mdelay(5);
+ uvd_v4_2_set_dcm(adev, true);
+ WREG32(mmUVD_CGC_GATE, 0);
/* take UVD block out of reset */
WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
mdelay(5);
- /* initialize UVD memory controller */
- WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
- (1 << 21) | (1 << 9) | (1 << 20));
+ /* enable VCPU clock */
+ WREG32(mmUVD_VCPU_CNTL, 1 << 9);
+
+ /* disable interupt */
+ WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
#ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
@@ -306,6 +293,11 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
#endif
WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+ /* initialize UVD memory controller */
+ WREG32(mmUVD_LMI_CTRL, 0x203108);
+
+ tmp = RREG32(mmUVD_MPC_CNTL);
+ WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
@@ -314,18 +306,20 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
WREG32(mmUVD_MPC_SET_ALU, 0);
WREG32(mmUVD_MPC_SET_MUX, 0x88);
- /* take all subblocks out of reset, except VCPU */
- WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
- mdelay(5);
+ uvd_v4_2_mc_resume(adev);
- /* enable VCPU clock */
- WREG32(mmUVD_VCPU_CNTL, 1 << 9);
+ tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
+ WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
/* enable UMC */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
- /* boot up the VCPU */
- WREG32(mmUVD_SOFT_RESET, 0);
+ WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
+
+ WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+
+ WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+
mdelay(10);
for (i = 0; i < 10; ++i) {
@@ -357,6 +351,8 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
/* enable interupt */
WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
+ WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
+
/* force RBC into idle state */
WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
@@ -393,22 +389,57 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
*/
static void uvd_v4_2_stop(struct amdgpu_device *adev)
{
- /* force RBC into idle state */
+ uint32_t i, j;
+ uint32_t status;
+
WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(mmUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(1);
+ }
+ if (status & 2)
+ break;
+ }
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(mmUVD_LMI_STATUS);
+ if (status & 0xf)
+ break;
+ mdelay(1);
+ }
+ if (status & 0xf)
+ break;
+ }
+
/* Stall UMC and register bus before resetting VCPU */
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
- mdelay(1);
- /* put VCPU into reset */
- WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
- mdelay(5);
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(mmUVD_LMI_STATUS);
+ if (status & 0x240)
+ break;
+ mdelay(1);
+ }
+ if (status & 0x240)
+ break;
+ }
- /* disable VCPU clock */
- WREG32(mmUVD_VCPU_CNTL, 0x0);
+ WREG32_P(0x3D49, 0, ~(1 << 2));
- /* Unstall UMC and register bus */
- WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+
+ WREG32(mmUVD_STATUS, 0);
uvd_v4_2_set_dcm(adev, false);
}
@@ -694,8 +725,26 @@ static int uvd_v4_2_set_powergating_state(void *handle,
if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev);
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
+ if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
+ CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
+ WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
+ UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
+ UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
+ mdelay(20);
+ }
+ }
return 0;
} else {
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
+ if (RREG32_SMC(ixCURRENT_PG_STATUS) &
+ CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
+ WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
+ UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
+ UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
+ mdelay(30);
+ }
+ }
return uvd_v4_2_start(adev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 6de6becce745..ad8c02e423d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle)
uint32_t tmp;
int r;
- r = uvd_v5_0_start(adev);
- if (r)
- goto done;
+ amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+ uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v5_0_enable_mgcg(adev, true);
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
@@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle)
amdgpu_ring_write(ring, 3);
amdgpu_ring_commit(ring);
+
done:
if (!r)
DRM_INFO("UVD initialized successfully.\n");
return r;
+
}
/**
@@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
- uvd_v5_0_stop(adev);
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v5_0_stop(adev);
+
ring->ready = false;
return 0;
@@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
uvd_v5_0_mc_resume(adev);
- amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
- uvd_v5_0_enable_mgcg(adev, true);
-
/* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
@@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev)
/* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ WREG32(mmUVD_STATUS, 0);
}
/**
@@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
- return 0;
-
if (enable) {
/* wait for STATUS to clear */
if (uvd_v5_0_wait_for_idle(handle))
@@ -822,16 +821,40 @@ static int uvd_v5_0_set_powergating_state(void *handle,
* the smc and the hw blocks
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
- return 0;
+ int ret = 0;
if (state == AMD_PG_STATE_GATE) {
uvd_v5_0_stop(adev);
- return 0;
} else {
- return uvd_v5_0_start(adev);
+ ret = uvd_v5_0_start(adev);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ mutex_lock(&adev->pm.mutex);
+
+ if (RREG32_SMC(ixCURRENT_PG_STATUS) &
+ CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
+ DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
+ goto out;
}
+
+ /* AMD_CG_SUPPORT_UVD_MGCG */
+ data = RREG32(mmUVD_CGC_CTRL);
+ if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
+ *flags |= AMD_CG_SUPPORT_UVD_MGCG;
+
+out:
+ mutex_unlock(&adev->pm.mutex);
}
static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
@@ -849,6 +872,7 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.soft_reset = uvd_v5_0_soft_reset,
.set_clockgating_state = uvd_v5_0_set_clockgating_state,
.set_powergating_state = uvd_v5_0_set_powergating_state,
+ .get_clockgating_state = uvd_v5_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ba0bbf7138dc..18a6de4e1512 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle)
uint32_t tmp;
int r;
- r = uvd_v6_0_start(adev);
- if (r)
- goto done;
+ amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
+ uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v6_0_enable_mgcg(adev, true);
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
@@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
- uvd_v6_0_stop(adev);
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v6_0_stop(adev);
+
ring->ready = false;
return 0;
@@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
lmi_swap_cntl = 0;
mp_swap_cntl = 0;
- amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
- uvd_v6_0_enable_mgcg(adev, true);
uvd_v6_0_mc_resume(adev);
/* disable interupt */
@@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev)
/* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ WREG32(mmUVD_STATUS, 0);
}
/**
@@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
- return 0;
-
if (enable) {
/* wait for STATUS to clear */
if (uvd_v6_0_wait_for_idle(handle))
@@ -1047,18 +1045,42 @@ static int uvd_v6_0_set_powergating_state(void *handle,
* the smc and the hw blocks
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
- return 0;
+ int ret = 0;
WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
if (state == AMD_PG_STATE_GATE) {
uvd_v6_0_stop(adev);
- return 0;
} else {
- return uvd_v6_0_start(adev);
+ ret = uvd_v6_0_start(adev);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ mutex_lock(&adev->pm.mutex);
+
+ if (RREG32_SMC(ixCURRENT_PG_STATUS) &
+ CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
+ DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
+ goto out;
}
+
+ /* AMD_CG_SUPPORT_UVD_MGCG */
+ data = RREG32(mmUVD_CGC_CTRL);
+ if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
+ *flags |= AMD_CG_SUPPORT_UVD_MGCG;
+
+out:
+ mutex_unlock(&adev->pm.mutex);
}
static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
@@ -1079,6 +1101,7 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.post_soft_reset = uvd_v6_0_post_soft_reset,
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
+ .get_clockgating_state = uvd_v6_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 38ed903dd6f8..9ea99348e493 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -42,10 +42,9 @@
#define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES)
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
-static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vce_v2_0_wait_for_idle(void *handle);
+
/**
* vce_v2_0_ring_get_rptr - get read pointer
*
@@ -140,6 +139,86 @@ static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
return -ETIMEDOUT;
}
+static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
+{
+ WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
+}
+
+static void vce_v2_0_init_cg(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp &= ~0xfff;
+ tmp |= ((0 << 0) | (4 << 4));
+ tmp |= 0x40000;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0xfff;
+ tmp |= ((0 << 0) | (4 << 4));
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp |= 0x10;
+ tmp &= ~0x100000;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+}
+
+static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
+{
+ uint64_t addr = adev->vce.gpu_addr;
+ uint32_t size;
+
+ WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
+ WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+ WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+ WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+
+ WREG32(mmVCE_LMI_CTRL, 0x00398000);
+ WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+ WREG32(mmVCE_LMI_SWAP_CNTL, 0);
+ WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
+ WREG32(mmVCE_LMI_VM_CTRL, 0);
+
+ addr += AMDGPU_VCE_FIRMWARE_OFFSET;
+ size = VCE_V2_0_FW_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = VCE_V2_0_STACK_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = VCE_V2_0_DATA_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
+
+ WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+ WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
+}
+
+static bool vce_v2_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+}
+
+static int vce_v2_0_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ unsigned i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (vce_v2_0_is_idle(handle))
+ return 0;
+ }
+ return -ETIMEDOUT;
+}
+
/**
* vce_v2_0_start - start VCE block
*
@@ -152,11 +231,14 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
- vce_v2_0_mc_resume(adev);
-
/* set BUSY flag */
WREG32_P(mmVCE_STATUS, 1, ~1);
+ vce_v2_0_init_cg(adev);
+ vce_v2_0_disable_cg(adev);
+
+ vce_v2_0_mc_resume(adev);
+
ring = &adev->vce.ring[0];
WREG32(mmVCE_RB_RPTR, ring->wptr);
WREG32(mmVCE_RB_WPTR, ring->wptr);
@@ -189,6 +271,145 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
return 0;
}
+static int vce_v2_0_stop(struct amdgpu_device *adev)
+{
+ int i, j;
+ int status;
+
+ if (vce_v2_0_lmi_clean(adev)) {
+ DRM_INFO("vce is not idle \n");
+ return 0;
+ }
+/*
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(mmVCE_FW_REG_STATUS);
+ if (!(status & 1))
+ break;
+ mdelay(1);
+ }
+ break;
+ }
+*/
+ if (vce_v2_0_wait_for_idle(adev)) {
+ DRM_INFO("VCE is busy, Can't set clock gateing");
+ return 0;
+ }
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(mmVCE_LMI_STATUS);
+ if (status & 0x240)
+ break;
+ mdelay(1);
+ }
+ break;
+ }
+
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
+
+ WREG32(mmVCE_STATUS, 0);
+
+ return 0;
+}
+
+static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
+{
+ u32 tmp;
+
+ if (gated) {
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp |= 0xe70000;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp |= 0xff000000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3fc;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+
+ WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+ } else {
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp |= 0xe7;
+ tmp &= ~0xe70000;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp |= 0x1fe000;
+ tmp &= ~0xff000000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp |= 0x3fc;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ }
+}
+
+static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
+{
+ u32 orig, tmp;
+
+/* LMI_MC/LMI_UMC always set in dynamic,
+ * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
+ */
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp &= ~0x00060006;
+
+/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
+ if (gated) {
+ tmp |= 0xe10000;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+ } else {
+ tmp |= 0xe1;
+ tmp &= ~0xe10000;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+ }
+
+ orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0x1fe000;
+ tmp &= ~0xff000000;
+ if (tmp != orig)
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3fc;
+ if (tmp != orig)
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+
+ /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
+
+ if(gated)
+ WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+}
+
+static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
+ bool sw_cg)
+{
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
+ if (sw_cg)
+ vce_v2_0_set_sw_cg(adev, true);
+ else
+ vce_v2_0_set_dyn_cg(adev, true);
+ } else {
+ vce_v2_0_disable_cg(adev);
+
+ if (sw_cg)
+ vce_v2_0_set_sw_cg(adev, false);
+ else
+ vce_v2_0_set_dyn_cg(adev, false);
+ }
+}
+
static int vce_v2_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -254,11 +475,8 @@ static int vce_v2_0_hw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vce_v2_0_start(adev);
- /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
- if (r)
- return 0;
-
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+ vce_v2_0_enable_mgcg(adev, true, false);
for (i = 0; i < adev->vce.num_rings; i++)
adev->vce.ring[i].ready = false;
@@ -312,190 +530,6 @@ static int vce_v2_0_resume(void *handle)
return r;
}
-static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
-{
- u32 tmp;
-
- if (gated) {
- tmp = RREG32(mmVCE_CLOCK_GATING_B);
- tmp |= 0xe70000;
- WREG32(mmVCE_CLOCK_GATING_B, tmp);
-
- tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
- tmp |= 0xff000000;
- WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
- tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
- tmp &= ~0x3fc;
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
-
- WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
- } else {
- tmp = RREG32(mmVCE_CLOCK_GATING_B);
- tmp |= 0xe7;
- tmp &= ~0xe70000;
- WREG32(mmVCE_CLOCK_GATING_B, tmp);
-
- tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
- tmp |= 0x1fe000;
- tmp &= ~0xff000000;
- WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
- tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
- tmp |= 0x3fc;
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
- }
-}
-
-static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
-{
- if (vce_v2_0_wait_for_idle(adev)) {
- DRM_INFO("VCE is busy, Can't set clock gateing");
- return;
- }
-
- WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
-
- if (vce_v2_0_lmi_clean(adev)) {
- DRM_INFO("LMI is busy, Can't set clock gateing");
- return;
- }
-
- WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- WREG32(mmVCE_STATUS, 0);
-
- if (gated)
- WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
- /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
- if (gated) {
- /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
- WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
- } else {
- /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
- WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
- }
-
- /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
- WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
-
- /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
-
- WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
- if(!gated) {
- WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
- mdelay(100);
- WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
- vce_v2_0_firmware_loaded(adev);
- WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
- }
-}
-
-static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
-{
- WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
-}
-
-static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
-{
- bool sw_cg = false;
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
- if (sw_cg)
- vce_v2_0_set_sw_cg(adev, true);
- else
- vce_v2_0_set_dyn_cg(adev, true);
- } else {
- vce_v2_0_disable_cg(adev);
-
- if (sw_cg)
- vce_v2_0_set_sw_cg(adev, false);
- else
- vce_v2_0_set_dyn_cg(adev, false);
- }
-}
-
-static void vce_v2_0_init_cg(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- tmp = RREG32(mmVCE_CLOCK_GATING_A);
- tmp &= ~0xfff;
- tmp |= ((0 << 0) | (4 << 4));
- tmp |= 0x40000;
- WREG32(mmVCE_CLOCK_GATING_A, tmp);
-
- tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
- tmp &= ~0xfff;
- tmp |= ((0 << 0) | (4 << 4));
- WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
-
- tmp = RREG32(mmVCE_CLOCK_GATING_B);
- tmp |= 0x10;
- tmp &= ~0x100000;
- WREG32(mmVCE_CLOCK_GATING_B, tmp);
-}
-
-static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
-{
- uint64_t addr = adev->vce.gpu_addr;
- uint32_t size;
-
- WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
- WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
- WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
- WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
-
- WREG32(mmVCE_LMI_CTRL, 0x00398000);
- WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
- WREG32(mmVCE_LMI_SWAP_CNTL, 0);
- WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
- WREG32(mmVCE_LMI_VM_CTRL, 0);
-
- addr += AMDGPU_VCE_FIRMWARE_OFFSET;
- size = VCE_V2_0_FW_SIZE;
- WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
- WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
-
- addr += size;
- size = VCE_V2_0_STACK_SIZE;
- WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
- WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
-
- addr += size;
- size = VCE_V2_0_DATA_SIZE;
- WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
- WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
-
- WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
- WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
-
- vce_v2_0_init_cg(adev);
-}
-
-static bool vce_v2_0_is_idle(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
-}
-
-static int vce_v2_0_wait_for_idle(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- unsigned i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (vce_v2_0_is_idle(handle))
- return 0;
- }
- return -ETIMEDOUT;
-}
-
static int vce_v2_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -539,33 +573,20 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
- if (enable)
- tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
- else
- tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
-
static int vce_v2_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
-
+ bool sw_cg = false;
- vce_v2_0_set_bypass_mode(adev, enable);
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (state == AMD_CG_STATE_GATE)
+ if (state == AMD_CG_STATE_GATE) {
gate = true;
+ sw_cg = true;
+ }
- vce_v2_0_enable_mgcg(adev, gate);
+ vce_v2_0_enable_mgcg(adev, gate, sw_cg);
return 0;
}
@@ -582,12 +603,8 @@ static int vce_v2_0_set_powergating_state(void *handle,
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
- return 0;
-
if (state == AMD_PG_STATE_GATE)
- /* XXX do we need a vce_v2_0_stop()? */
- return 0;
+ return vce_v2_0_stop(adev);
else
return vce_v2_0_start(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 37ca685e5a9a..93ec8815bb13 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -432,9 +432,9 @@ static int vce_v3_0_hw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vce_v3_0_start(adev);
- if (r)
- return r;
+ vce_v3_0_override_vce_clock_gating(adev, true);
+ if (!(adev->flags & AMD_IS_APU))
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
for (i = 0; i < adev->vce.num_rings; i++)
adev->vce.ring[i].ready = false;
@@ -510,6 +510,8 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
WREG32(mmVCE_LMI_SWAP_CNTL, 0);
WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
WREG32(mmVCE_LMI_VM_CTRL, 0);
+ WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000);
+
if (adev->asic_type >= CHIP_STONEY) {
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
@@ -708,18 +710,6 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
- if (enable)
- tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
- else
- tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -727,11 +717,6 @@ static int vce_v3_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_TONGA) ||
- (adev->asic_type == CHIP_FIJI))
- vce_v3_0_set_bypass_mode(adev, enable);
-
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
@@ -777,15 +762,44 @@ static int vce_v3_0_set_powergating_state(void *handle,
* the smc and the hw blocks
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret = 0;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
- return 0;
+ if (state == AMD_PG_STATE_GATE) {
+ ret = vce_v3_0_stop(adev);
+ if (ret)
+ goto out;
+ } else {
+ ret = vce_v3_0_start(adev);
+ if (ret)
+ goto out;
+ }
- if (state == AMD_PG_STATE_GATE)
- /* XXX do we need a vce_v3_0_stop()? */
- return 0;
- else
- return vce_v3_0_start(adev);
+out:
+ return ret;
+}
+
+static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ mutex_lock(&adev->pm.mutex);
+
+ if (RREG32_SMC(ixCURRENT_PG_STATUS) &
+ CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) {
+ DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
+ goto out;
+ }
+
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+
+ /* AMD_CG_SUPPORT_VCE_MGCG */
+ data = RREG32(mmVCE_CLOCK_GATING_A);
+ if (data & (0x04 << 4))
+ *flags |= AMD_CG_SUPPORT_VCE_MGCG;
+
+out:
+ mutex_unlock(&adev->pm.mutex);
}
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
@@ -839,6 +853,7 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.post_soft_reset = vce_v3_0_post_soft_reset,
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
+ .get_clockgating_state = vce_v3_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index c2ac54f11341..50bdb24ef8d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -20,9 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <linux/firmware.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include "drmP.h"
#include "amdgpu.h"
#include "amdgpu_atombios.h"
@@ -78,17 +76,7 @@
#include "amdgpu_acp.h"
#endif
#include "dce_virtual.h"
-
-MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
-MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
-MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
-MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
-MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
-MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+#include "mxgpu_vi.h"
/*
* Indirect registers accessor
@@ -285,6 +273,12 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
/* Some of the registers might be dependent on GRBM_GFX_INDEX */
mutex_lock(&adev->grbm_idx_mutex);
+ if (amdgpu_sriov_vf(adev)) {
+ xgpu_vi_init_golden_registers(adev);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return;
+ }
+
switch (adev->asic_type) {
case CHIP_TOPAZ:
amdgpu_program_register_sequence(adev,
@@ -458,14 +452,14 @@ static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
/* bit0: 0 means pf and 1 means vf */
/* bit31: 0 means disable IOV and 1 means enable */
if (reg & 1)
- adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
if (reg & 0x80000000)
- adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
if (reg == 0) {
if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
- adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
}
@@ -727,6 +721,7 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
/* enable BM */
pci_set_master(adev->pdev);
+ adev->has_hw_reset = true;
return 0;
}
udelay(1);
@@ -801,7 +796,37 @@ static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
{
- /* todo */
+ int r, i;
+ struct atom_clock_dividers dividers;
+ u32 tmp;
+
+ r = amdgpu_atombios_get_clock_dividers(adev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ ecclk, false, &dividers);
+ if (r)
+ return r;
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
+
+ tmp = RREG32_SMC(ixCG_ECLK_CNTL);
+ tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
+ CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
+ tmp |= dividers.post_divider;
+ WREG32_SMC(ixCG_ECLK_CNTL, tmp);
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
return 0;
}
@@ -869,7 +894,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
{
.read_disabled_bios = &vi_read_disabled_bios,
.read_bios_from_rom = &vi_read_bios_from_rom,
- .detect_hw_virtualization = vi_detect_hw_virtualization,
.read_register = &vi_read_register,
.reset = &vi_asic_reset,
.set_vga_state = &vi_vga_set_state,
@@ -905,6 +929,11 @@ static int vi_common_early_init(void *handle)
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
smc_enabled = true;
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_setting(adev);
+ xgpu_vi_mailbox_set_irq_funcs(adev);
+ }
+
adev->rev_id = vi_get_rev_id(adev);
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
@@ -1061,10 +1090,6 @@ static int vi_common_early_init(void *handle)
return -EINVAL;
}
- /* in early init stage, vbios code won't work */
- if (adev->asic_funcs->detect_hw_virtualization)
- amdgpu_asic_detect_hw_virtualization(adev);
-
if (amdgpu_smc_load_fw && smc_enabled)
adev->firmware.smu_load = true;
@@ -1073,8 +1098,23 @@ static int vi_common_early_init(void *handle)
return 0;
}
+static int vi_common_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_vi_mailbox_get_irq(adev);
+
+ return 0;
+}
+
static int vi_common_sw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_vi_mailbox_add_irq_id(adev);
+
return 0;
}
@@ -1106,6 +1146,9 @@ static int vi_common_hw_fini(void *handle)
/* enable the doorbell aperture */
vi_enable_doorbell_aperture(adev, false);
+ if (amdgpu_sriov_vf(adev))
+ xgpu_vi_mailbox_put_irq(adev);
+
return 0;
}
@@ -1190,6 +1233,23 @@ static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
WREG32(mmHDP_MEM_POWER_LS, data);
}
+static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t temp, data;
+
+ temp = data = RREG32(0x157a);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
+ data |= 1;
+ else
+ data &= ~1;
+
+ if (temp != data)
+ WREG32(0x157a, data);
+}
+
+
static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -1350,6 +1410,8 @@ static int vi_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE ? true : false);
vi_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
+ vi_update_drm_light_sleep(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
break;
case CHIP_TONGA:
case CHIP_POLARIS10:
@@ -1368,10 +1430,36 @@ static int vi_common_set_powergating_state(void *handle,
return 0;
}
+static void vi_common_get_clockgating_state(void *handle, u32 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ /* AMD_CG_SUPPORT_BIF_LS */
+ data = RREG32_PCIE(ixPCIE_CNTL2);
+ if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+
+ /* AMD_CG_SUPPORT_HDP_LS */
+ data = RREG32(mmHDP_MEM_POWER_LS);
+ if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ data = RREG32(mmHDP_HOST_PATH_CNTL);
+ if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+ /* AMD_CG_SUPPORT_ROM_MGCG */
+ data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
+ if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
+ *flags |= AMD_CG_SUPPORT_ROM_MGCG;
+}
+
static const struct amd_ip_funcs vi_common_ip_funcs = {
.name = "vi_common",
.early_init = vi_common_early_init,
- .late_init = NULL,
+ .late_init = vi_common_late_init,
.sw_init = vi_common_sw_init,
.sw_fini = vi_common_sw_fini,
.hw_init = vi_common_hw_init,
@@ -1383,6 +1471,7 @@ static const struct amd_ip_funcs vi_common_ip_funcs = {
.soft_reset = vi_common_soft_reset,
.set_clockgating_state = vi_common_set_clockgating_state,
.set_powergating_state = vi_common_set_powergating_state,
+ .get_clockgating_state = vi_common_get_clockgating_state,
};
static const struct amdgpu_ip_block_version vi_common_ip_block =
@@ -1396,6 +1485,12 @@ static const struct amdgpu_ip_block_version vi_common_ip_block =
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
+ /* in early init stage, vbios code won't work */
+ vi_detect_hw_virtualization(adev);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.ops = &xgpu_vi_virt_ops;
+
switch (adev->asic_type) {
case CHIP_TOPAZ:
/* topaz has no DCE, UVD, VCE */
@@ -1413,28 +1508,32 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
- if (adev->enable_virtual_display)
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
else
amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ if (!amdgpu_sriov_vf(adev)) {
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ }
break;
case CHIP_TONGA:
amdgpu_ip_block_add(adev, &vi_common_ip_block);
amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
- if (adev->enable_virtual_display)
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
else
amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ if (!amdgpu_sriov_vf(adev)) {
+ amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ }
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 575d7aed5d32..719587b8b0cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -28,4 +28,116 @@ void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
+struct amdgpu_ce_ib_state
+{
+ uint32_t ce_ib_completion_status;
+ uint32_t ce_constegnine_count;
+ uint32_t ce_ibOffset_ib1;
+ uint32_t ce_ibOffset_ib2;
+}; /* Total of 4 DWORD */
+
+struct amdgpu_de_ib_state
+{
+ uint32_t ib_completion_status;
+ uint32_t de_constEngine_count;
+ uint32_t ib_offset_ib1;
+ uint32_t ib_offset_ib2;
+ uint32_t preamble_begin_ib1;
+ uint32_t preamble_begin_ib2;
+ uint32_t preamble_end_ib1;
+ uint32_t preamble_end_ib2;
+ uint32_t draw_indirect_baseLo;
+ uint32_t draw_indirect_baseHi;
+ uint32_t disp_indirect_baseLo;
+ uint32_t disp_indirect_baseHi;
+ uint32_t gds_backup_addrlo;
+ uint32_t gds_backup_addrhi;
+ uint32_t index_base_addrlo;
+ uint32_t index_base_addrhi;
+ uint32_t sample_cntl;
+}; /* Total of 17 DWORD */
+
+struct amdgpu_ce_ib_state_chained_ib
+{
+ /* section of non chained ib part */
+ uint32_t ce_ib_completion_status;
+ uint32_t ce_constegnine_count;
+ uint32_t ce_ibOffset_ib1;
+ uint32_t ce_ibOffset_ib2;
+
+ /* section of chained ib */
+ uint32_t ce_chainib_addrlo_ib1;
+ uint32_t ce_chainib_addrlo_ib2;
+ uint32_t ce_chainib_addrhi_ib1;
+ uint32_t ce_chainib_addrhi_ib2;
+ uint32_t ce_chainib_size_ib1;
+ uint32_t ce_chainib_size_ib2;
+}; /* total 10 DWORD */
+
+struct amdgpu_de_ib_state_chained_ib
+{
+ /* section of non chained ib part */
+ uint32_t ib_completion_status;
+ uint32_t de_constEngine_count;
+ uint32_t ib_offset_ib1;
+ uint32_t ib_offset_ib2;
+
+ /* section of chained ib */
+ uint32_t chain_ib_addrlo_ib1;
+ uint32_t chain_ib_addrlo_ib2;
+ uint32_t chain_ib_addrhi_ib1;
+ uint32_t chain_ib_addrhi_ib2;
+ uint32_t chain_ib_size_ib1;
+ uint32_t chain_ib_size_ib2;
+
+ /* section of non chained ib part */
+ uint32_t preamble_begin_ib1;
+ uint32_t preamble_begin_ib2;
+ uint32_t preamble_end_ib1;
+ uint32_t preamble_end_ib2;
+
+ /* section of chained ib */
+ uint32_t chain_ib_pream_addrlo_ib1;
+ uint32_t chain_ib_pream_addrlo_ib2;
+ uint32_t chain_ib_pream_addrhi_ib1;
+ uint32_t chain_ib_pream_addrhi_ib2;
+
+ /* section of non chained ib part */
+ uint32_t draw_indirect_baseLo;
+ uint32_t draw_indirect_baseHi;
+ uint32_t disp_indirect_baseLo;
+ uint32_t disp_indirect_baseHi;
+ uint32_t gds_backup_addrlo;
+ uint32_t gds_backup_addrhi;
+ uint32_t index_base_addrlo;
+ uint32_t index_base_addrhi;
+ uint32_t sample_cntl;
+}; /* Total of 27 DWORD */
+
+struct amdgpu_gfx_meta_data
+{
+ /* 4 DWORD, address must be 4KB aligned */
+ struct amdgpu_ce_ib_state ce_payload;
+ uint32_t reserved1[60];
+ /* 17 DWORD, address must be 64B aligned */
+ struct amdgpu_de_ib_state de_payload;
+ /* PFP IB base address which get pre-empted */
+ uint32_t DeIbBaseAddrLo;
+ uint32_t DeIbBaseAddrHi;
+ uint32_t reserved2[941];
+}; /* Total of 4K Bytes */
+
+struct amdgpu_gfx_meta_data_chained_ib
+{
+ /* 10 DWORD, address must be 4KB aligned */
+ struct amdgpu_ce_ib_state_chained_ib ce_payload;
+ uint32_t reserved1[54];
+ /* 27 DWORD, address must be 64B aligned */
+ struct amdgpu_de_ib_state_chained_ib de_payload;
+ /* PFP IB base address which get pre-empted */
+ uint32_t DeIbBaseAddrLo;
+ uint32_t DeIbBaseAddrHi;
+ uint32_t reserved2[931];
+}; /* Total of 4K Bytes */
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
index fc120ba18aad..c43e03fddfba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
@@ -29,8 +29,4 @@ int cz_smu_init(struct amdgpu_device *adev);
int cz_smu_start(struct amdgpu_device *adev);
int cz_smu_fini(struct amdgpu_device *adev);
-extern const struct amd_ip_funcs tonga_dpm_ip_funcs;
-extern const struct amd_ip_funcs fiji_dpm_ip_funcs;
-extern const struct amd_ip_funcs iceland_dpm_ip_funcs;
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 11746f22d0c5..7a3863a45f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -360,6 +360,8 @@
#define PACKET3_WAIT_ON_CE_COUNTER 0x86
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SWITCH_BUFFER 0x8B
+#define PACKET3_SET_RESOURCES 0xA0
+#define PACKET3_MAP_QUEUES 0xA2
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index ee3e04e10dae..6316aad43a73 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -486,7 +486,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
return status;
}
-static int kfd_ioctl_dbg_unrgesiter(struct file *filep,
+static int kfd_ioctl_dbg_unregister(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_dbg_unregister_args *args = data;
@@ -498,7 +498,7 @@ static int kfd_ioctl_dbg_unrgesiter(struct file *filep,
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
- pr_debug("kfd_ioctl_dbg_unrgesiter not supported on CZ\n");
+ pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
return -EINVAL;
}
@@ -892,7 +892,7 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
kfd_ioctl_dbg_register, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER,
- kfd_ioctl_dbg_unrgesiter, 0),
+ kfd_ioctl_dbg_unregister, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH,
kfd_ioctl_dbg_address_watch, 0),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index a6a4b2b1c0d9..6a3470f84998 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -739,8 +739,10 @@ int kfd_wait_on_events(struct kfd_process *p,
struct kfd_event_data event_data;
if (copy_from_user(&event_data, &events[i],
- sizeof(struct kfd_event_data)))
+ sizeof(struct kfd_event_data))) {
+ ret = -EFAULT;
goto fail;
+ }
ret = init_event_waiter(p, &event_waiters[i],
event_data.event_id, i);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 85f358764bbc..43f45adeccd1 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -80,6 +80,18 @@ enum amd_clockgating_state {
AMD_CG_STATE_UNGATE,
};
+enum amd_dpm_forced_level {
+ AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
+ AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
+ AMD_DPM_FORCED_LEVEL_LOW = 0x4,
+ AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
+ AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
+ AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
+};
+
enum amd_powergating_state {
AMD_PG_STATE_GATE = 0,
AMD_PG_STATE_UNGATE,
@@ -206,6 +218,8 @@ struct amd_ip_funcs {
/* enable/disable pg for the IP block */
int (*set_powergating_state)(void *handle,
enum amd_powergating_state state);
+ /* get current clockgating status */
+ void (*get_clockgating_state)(void *handle, u32 *flags);
};
#endif /* __AMD_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_d.h
index 95570dbd18bb..813957a17a2d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_d.h
@@ -4552,6 +4552,14 @@
#define mmDP4_DP_DPHY_PRBS_CNTL 0x4eb5
#define mmDP5_DP_DPHY_PRBS_CNTL 0x4fb5
#define mmDP6_DP_DPHY_PRBS_CNTL 0x54b5
+#define mmDP_DPHY_SCRAM_CNTL 0x4ab6
+#define mmDP0_DP_DPHY_SCRAM_CNTL 0x4ab6
+#define mmDP1_DP_DPHY_SCRAM_CNTL 0x4bb6
+#define mmDP2_DP_DPHY_SCRAM_CNTL 0x4cb6
+#define mmDP3_DP_DPHY_SCRAM_CNTL 0x4db6
+#define mmDP4_DP_DPHY_SCRAM_CNTL 0x4eb6
+#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4fb6
+#define mmDP6_DP_DPHY_SCRAM_CNTL 0x54b6
#define mmDP_DPHY_CRC_EN 0x4ab7
#define mmDP0_DP_DPHY_CRC_EN 0x4ab7
#define mmDP1_DP_DPHY_CRC_EN 0x4bb7
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h
index 8a75eb9d732b..c755f43aaaf8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h
@@ -8690,6 +8690,10 @@
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
#define DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x1
#define DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x10
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_d.h
index c39234ecedd0..6df651a94b0a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_d.h
@@ -4544,6 +4544,15 @@
#define mmDP6_DP_DPHY_PRBS_CNTL 0x54b5
#define mmDP7_DP_DPHY_PRBS_CNTL 0x56b5
#define mmDP8_DP_DPHY_PRBS_CNTL 0x57b5
+#define mmDP_DPHY_SCRAM_CNTL 0x4ab6
+#define mmDP0_DP_DPHY_SCRAM_CNTL 0x4ab6
+#define mmDP1_DP_DPHY_SCRAM_CNTL 0x4bb6
+#define mmDP2_DP_DPHY_SCRAM_CNTL 0x4cb6
+#define mmDP3_DP_DPHY_SCRAM_CNTL 0x4db6
+#define mmDP4_DP_DPHY_SCRAM_CNTL 0x4eb6
+#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4fb6
+#define mmDP6_DP_DPHY_SCRAM_CNTL 0x54b6
+#define mmDP8_DP_DPHY_SCRAM_CNTL 0x56b6
#define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4adc
#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4adc
#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4bdc
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h
index a438c2b6e280..14a3bacfcfd1 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h
@@ -6004,6 +6004,8 @@
#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK__SHIFT 0xc
#define HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x1
#define HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x2
+#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x4
#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
#define HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x8
@@ -8364,6 +8366,10 @@
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x3ff
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x8000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
index 09a7df17570d..367b191d49fb 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
@@ -5776,6 +5776,15 @@
#define mmDP6_DP_DPHY_PRBS_CNTL 0x54b5
#define mmDP7_DP_DPHY_PRBS_CNTL 0x56b5
#define mmDP8_DP_DPHY_PRBS_CNTL 0x57b5
+#define mmDP_DPHY_SCRAM_CNTL 0x4ab6
+#define mmDP0_DP_DPHY_SCRAM_CNTL 0x4ab6
+#define mmDP1_DP_DPHY_SCRAM_CNTL 0x4bb6
+#define mmDP2_DP_DPHY_SCRAM_CNTL 0x4cb6
+#define mmDP3_DP_DPHY_SCRAM_CNTL 0x4db6
+#define mmDP4_DP_DPHY_SCRAM_CNTL 0x4eb6
+#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4fb6
+#define mmDP6_DP_DPHY_SCRAM_CNTL 0x54b6
+#define mmDP8_DP_DPHY_SCRAM_CNTL 0x56b6
#define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4adc
#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4adc
#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4bdc
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
index 1ddc4183a1c9..106094ed0661 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
@@ -7088,6 +7088,8 @@
#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK__SHIFT 0xc
#define HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x1
#define HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x2
+#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x4
#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
#define HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x8
@@ -9626,6 +9628,10 @@
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x3ff
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x8000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h
index d3ccf5a86de0..93d84a475134 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h
@@ -3920,6 +3920,14 @@
#define mmDP4_DP_DPHY_PRBS_CNTL 0x48d4
#define mmDP5_DP_DPHY_PRBS_CNTL 0x4bd4
#define mmDP6_DP_DPHY_PRBS_CNTL 0x4ed4
+#define mmDP_DPHY_SCRAM_CNTL 0x1cd5
+#define mmDP0_DP_DPHY_SCRAM_CNTL 0x1cd5
+#define mmDP1_DP_DPHY_SCRAM_CNTL 0x1fd5
+#define mmDP2_DP_DPHY_SCRAM_CNTL 0x42d5
+#define mmDP3_DP_DPHY_SCRAM_CNTL 0x45d5
+#define mmDP4_DP_DPHY_SCRAM_CNTL 0x48d5
+#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4bd5
+#define mmDP6_DP_DPHY_SCRAM_CNTL 0x4ed5
#define mmDP_DPHY_CRC_EN 0x1cd6
#define mmDP0_DP_DPHY_CRC_EN 0x1cd6
#define mmDP1_DP_DPHY_CRC_EN 0x1fd6
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
index c331c9fe7b81..9b6825b74cc1 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
@@ -9214,6 +9214,10 @@
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00
+#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
#define DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x1
#define DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x10
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
deleted file mode 100644
index 895c8e2353e3..000000000000
--- a/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2010 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Alex Deucher
- */
-#ifndef __SI_REG_H__
-#define __SI_REG_H__
-
-/* SI */
-#define SI_DC_GPIO_HPD_MASK 0x196c
-#define SI_DC_GPIO_HPD_A 0x196d
-#define SI_DC_GPIO_HPD_EN 0x196e
-#define SI_DC_GPIO_HPD_Y 0x196f
-
-#define SI_GRPH_CONTROL 0x1a01
-# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
-# define SI_GRPH_DEPTH_8BPP 0
-# define SI_GRPH_DEPTH_16BPP 1
-# define SI_GRPH_DEPTH_32BPP 2
-# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-# define SI_ADDR_SURF_2_BANK 0
-# define SI_ADDR_SURF_4_BANK 1
-# define SI_ADDR_SURF_8_BANK 2
-# define SI_ADDR_SURF_16_BANK 3
-# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
-# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-# define SI_ADDR_SURF_BANK_WIDTH_1 0
-# define SI_ADDR_SURF_BANK_WIDTH_2 1
-# define SI_ADDR_SURF_BANK_WIDTH_4 2
-# define SI_ADDR_SURF_BANK_WIDTH_8 3
-# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
-/* 8 BPP */
-# define SI_GRPH_FORMAT_INDEXED 0
-/* 16 BPP */
-# define SI_GRPH_FORMAT_ARGB1555 0
-# define SI_GRPH_FORMAT_ARGB565 1
-# define SI_GRPH_FORMAT_ARGB4444 2
-# define SI_GRPH_FORMAT_AI88 3
-# define SI_GRPH_FORMAT_MONO16 4
-# define SI_GRPH_FORMAT_BGRA5551 5
-/* 32 BPP */
-# define SI_GRPH_FORMAT_ARGB8888 0
-# define SI_GRPH_FORMAT_ARGB2101010 1
-# define SI_GRPH_FORMAT_32BPP_DIG 2
-# define SI_GRPH_FORMAT_8B_ARGB2101010 3
-# define SI_GRPH_FORMAT_BGRA1010102 4
-# define SI_GRPH_FORMAT_8B_BGRA1010102 5
-# define SI_GRPH_FORMAT_RGB111110 6
-# define SI_GRPH_FORMAT_BGR101111 7
-# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-# define SI_ADDR_SURF_BANK_HEIGHT_1 0
-# define SI_ADDR_SURF_BANK_HEIGHT_2 1
-# define SI_ADDR_SURF_BANK_HEIGHT_4 2
-# define SI_ADDR_SURF_BANK_HEIGHT_8 3
-# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-# define SI_ADDR_SURF_TILE_SPLIT_64B 0
-# define SI_ADDR_SURF_TILE_SPLIT_128B 1
-# define SI_ADDR_SURF_TILE_SPLIT_256B 2
-# define SI_ADDR_SURF_TILE_SPLIT_512B 3
-# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
-# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
-# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
-# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
-# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
-# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
-# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
-# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
-# define SI_ADDR_SURF_P2 0
-# define SI_ADDR_SURF_P4_8x16 4
-# define SI_ADDR_SURF_P4_16x16 5
-# define SI_ADDR_SURF_P4_16x32 6
-# define SI_ADDR_SURF_P4_32x32 7
-# define SI_ADDR_SURF_P8_16x16_8x16 8
-# define SI_ADDR_SURF_P8_16x32_8x16 9
-# define SI_ADDR_SURF_P8_32x32_8x16 10
-# define SI_ADDR_SURF_P8_16x32_16x16 11
-# define SI_ADDR_SURF_P8_32x32_16x16 12
-# define SI_ADDR_SURF_P8_32x32_16x32 13
-# define SI_ADDR_SURF_P8_32x64_32x32 14
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
index f9fd2ea4625b..dbc2e723f659 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
@@ -1310,5 +1310,6 @@
#define ixROM_SW_DATA_62 0xc060012c
#define ixROM_SW_DATA_63 0xc0600130
#define ixROM_SW_DATA_64 0xc0600134
+#define ixCURRENT_PG_STATUS 0xc020029c
#endif /* SMU_7_0_1_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
index 25882a4dea5d..34c6ff52710e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
@@ -5452,5 +5452,7 @@
#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
+#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
+#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
#endif /* SMU_7_0_1_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
index a9ef1562f43b..66597c64f525 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
@@ -1121,5 +1121,6 @@
#define ixROM_SW_DATA_62 0xc060011c
#define ixROM_SW_DATA_63 0xc0600120
#define ixROM_SW_DATA_64 0xc0600124
+#define ixCURRENT_PG_STATUS 0xc020029c
#endif /* SMU_7_1_1_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h
index 2c997f7b5d13..fb06f2e2f6e6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h
@@ -4860,5 +4860,7 @@
#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
+#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
+#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
#endif /* SMU_7_1_1_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index 22dd4c2b7290..4446d43d2a8f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -1271,5 +1271,6 @@
#define ixROM_SW_DATA_62 0xc060011c
#define ixROM_SW_DATA_63 0xc0600120
#define ixROM_SW_DATA_64 0xc0600124
+#define ixCURRENT_PG_STATUS 0xc020029c
#endif /* SMU_7_1_2_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
index 518fd02e9d35..627906674fe8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
@@ -5830,5 +5830,7 @@
#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
+#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
+#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
#endif /* SMU_7_1_2_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index eca2b851f25f..0333d880bc9e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -1244,5 +1244,5 @@
#define ixGC_CAC_ACC_CU14 0xc8
#define ixGC_CAC_ACC_CU15 0xc9
#define ixGC_CAC_OVRD_CU 0xe7
-
+#define ixCURRENT_PG_STATUS 0xc020029c
#endif /* SMU_7_1_3_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
index 1ede9e274714..654c1093d362 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -6076,5 +6076,8 @@
#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0
#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000
#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10
+#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
+#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
+
#endif /* SMU_7_1_3_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index e4a1697ec1d3..17b9d41f3e87 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -171,6 +171,7 @@ struct cgs_firmware_info {
uint32_t ucode_start_address;
void *kptr;
+ bool is_kicker;
};
struct cgs_mode_info {
@@ -622,6 +623,8 @@ typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
+typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
+
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_gpu_mem_info_t gpu_mem_info;
@@ -674,6 +677,7 @@ struct cgs_ops {
/* get system info */
cgs_query_system_info query_system_info;
cgs_is_virtualization_enabled_t is_virtualization_enabled;
+ cgs_enter_safe_mode enter_safe_mode;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -779,4 +783,8 @@ struct cgs_device
#define cgs_is_virtualization_enabled(cgs_device) \
CGS_CALL(is_virtualization_enabled, cgs_device)
+
+#define cgs_enter_safe_mode(cgs_device, en) \
+ CGS_CALL(enter_safe_mode, cgs_device, en)
+
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index c81cf1412728..429f18b99323 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
@@ -29,153 +30,154 @@
#include "pp_instance.h"
#include "power_state.h"
#include "eventmanager.h"
-#include "pp_debug.h"
-#define PP_CHECK(handle) \
- do { \
- if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \
- return -EINVAL; \
- } while (0)
+static inline int pp_check(struct pp_instance *handle)
+{
+ if (handle == NULL || handle->pp_valid != PP_VALID)
+ return -EINVAL;
-#define PP_CHECK_HW(hwmgr) \
- do { \
- if ((hwmgr) == NULL || (hwmgr)->hwmgr_func == NULL) \
- return 0; \
- } while (0)
+ if (handle->smu_mgr == NULL || handle->smu_mgr->smumgr_funcs == NULL)
+ return -EINVAL;
+
+ if (handle->pm_en == 0)
+ return PP_DPM_DISABLED;
+
+ if (handle->hwmgr == NULL || handle->hwmgr->hwmgr_func == NULL
+ || handle->eventmgr == NULL)
+ return PP_DPM_DISABLED;
+
+ return 0;
+}
static int pp_early_init(void *handle)
{
+ int ret;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+
+ ret = smum_early_init(pp_handle);
+ if (ret)
+ return ret;
+
+ if ((pp_handle->pm_en == 0)
+ || cgs_is_virtualization_enabled(pp_handle->device))
+ return PP_DPM_DISABLED;
+
+ ret = hwmgr_early_init(pp_handle);
+ if (ret) {
+ pp_handle->pm_en = 0;
+ return PP_DPM_DISABLED;
+ }
+
+ ret = eventmgr_early_init(pp_handle);
+ if (ret) {
+ kfree(pp_handle->hwmgr);
+ pp_handle->hwmgr = NULL;
+ pp_handle->pm_en = 0;
+ return PP_DPM_DISABLED;
+ }
+
return 0;
}
static int pp_sw_init(void *handle)
{
- struct pp_instance *pp_handle;
- struct pp_hwmgr *hwmgr;
+ struct pp_smumgr *smumgr;
int ret = 0;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
- if (handle == NULL)
- return -EINVAL;
-
- pp_handle = (struct pp_instance *)handle;
- hwmgr = pp_handle->hwmgr;
-
- PP_CHECK_HW(hwmgr);
-
- if (hwmgr->pptable_func == NULL ||
- hwmgr->pptable_func->pptable_init == NULL ||
- hwmgr->hwmgr_func->backend_init == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- ret = hwmgr->pptable_func->pptable_init(hwmgr);
- if (ret)
- goto err;
+ if (ret == 0 || ret == PP_DPM_DISABLED) {
+ smumgr = pp_handle->smu_mgr;
- ret = hwmgr->hwmgr_func->backend_init(hwmgr);
- if (ret)
- goto err1;
+ if (smumgr->smumgr_funcs->smu_init == NULL)
+ return -EINVAL;
- pr_info("amdgpu: powerplay initialized\n");
+ ret = smumgr->smumgr_funcs->smu_init(smumgr);
- return 0;
-err1:
- if (hwmgr->pptable_func->pptable_fini)
- hwmgr->pptable_func->pptable_fini(hwmgr);
-err:
- pr_err("amdgpu: powerplay initialization failed\n");
+ pr_info("amdgpu: powerplay sw initialized\n");
+ }
return ret;
}
static int pp_sw_fini(void *handle)
{
- struct pp_instance *pp_handle;
- struct pp_hwmgr *hwmgr;
+ struct pp_smumgr *smumgr;
int ret = 0;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
+ if (ret == 0 || ret == PP_DPM_DISABLED) {
+ smumgr = pp_handle->smu_mgr;
- pp_handle = (struct pp_instance *)handle;
- hwmgr = pp_handle->hwmgr;
-
- PP_CHECK_HW(hwmgr);
-
- if (hwmgr->hwmgr_func->backend_fini != NULL)
- ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
-
- if (hwmgr->pptable_func->pptable_fini)
- hwmgr->pptable_func->pptable_fini(hwmgr);
+ if (smumgr->smumgr_funcs->smu_fini == NULL)
+ return -EINVAL;
+ ret = smumgr->smumgr_funcs->smu_fini(smumgr);
+ }
return ret;
}
static int pp_hw_init(void *handle)
{
- struct pp_instance *pp_handle;
struct pp_smumgr *smumgr;
struct pp_eventmgr *eventmgr;
- struct pp_hwmgr *hwmgr;
int ret = 0;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- pp_handle = (struct pp_instance *)handle;
- smumgr = pp_handle->smu_mgr;
- hwmgr = pp_handle->hwmgr;
+ if (ret == 0 || ret == PP_DPM_DISABLED) {
+ smumgr = pp_handle->smu_mgr;
- if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
- smumgr->smumgr_funcs->smu_init == NULL ||
- smumgr->smumgr_funcs->start_smu == NULL)
- return -EINVAL;
-
- ret = smumgr->smumgr_funcs->smu_init(smumgr);
- if (ret) {
- printk(KERN_ERR "[ powerplay ] smc initialization failed\n");
- return ret;
- }
+ if (smumgr->smumgr_funcs->start_smu == NULL)
+ return -EINVAL;
- ret = smumgr->smumgr_funcs->start_smu(smumgr);
- if (ret) {
- printk(KERN_ERR "[ powerplay ] smc start failed\n");
- smumgr->smumgr_funcs->smu_fini(smumgr);
- return ret;
+ if(smumgr->smumgr_funcs->start_smu(smumgr)) {
+ pr_err("smc start failed\n");
+ smumgr->smumgr_funcs->smu_fini(smumgr);
+ return -EINVAL;;
+ }
+ if (ret == PP_DPM_DISABLED)
+ return PP_DPM_DISABLED;
}
- PP_CHECK_HW(hwmgr);
-
- hw_init_power_state_table(hwmgr);
+ ret = hwmgr_hw_init(pp_handle);
+ if (ret)
+ goto err;
eventmgr = pp_handle->eventmgr;
- if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
- return -EINVAL;
+ if (eventmgr->pp_eventmgr_init == NULL ||
+ eventmgr->pp_eventmgr_init(eventmgr))
+ goto err;
- ret = eventmgr->pp_eventmgr_init(eventmgr);
return 0;
+err:
+ pp_handle->pm_en = 0;
+ kfree(pp_handle->eventmgr);
+ kfree(pp_handle->hwmgr);
+ pp_handle->hwmgr = NULL;
+ pp_handle->eventmgr = NULL;
+ return PP_DPM_DISABLED;
}
static int pp_hw_fini(void *handle)
{
- struct pp_instance *pp_handle;
- struct pp_smumgr *smumgr;
struct pp_eventmgr *eventmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
-
- pp_handle = (struct pp_instance *)handle;
- eventmgr = pp_handle->eventmgr;
+ ret = pp_check(pp_handle);
- if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL)
- eventmgr->pp_eventmgr_fini(eventmgr);
+ if (ret == 0) {
+ eventmgr = pp_handle->eventmgr;
- smumgr = pp_handle->smu_mgr;
-
- if (smumgr != NULL && smumgr->smumgr_funcs != NULL &&
- smumgr->smumgr_funcs->smu_fini != NULL)
- smumgr->smumgr_funcs->smu_fini(smumgr);
+ if (eventmgr->pp_eventmgr_fini != NULL)
+ eventmgr->pp_eventmgr_fini(eventmgr);
+ hwmgr_hw_fini(pp_handle);
+ }
return 0;
}
@@ -198,16 +200,18 @@ static int pp_sw_reset(void *handle)
int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -218,16 +222,18 @@ static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -238,49 +244,53 @@ static int pp_set_powergating_state(void *handle,
static int pp_suspend(void *handle)
{
- struct pp_instance *pp_handle;
struct pp_eventmgr *eventmgr;
struct pem_event_data event_data = { {0} };
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
+
+ if (ret != 0)
+ return ret;
- pp_handle = (struct pp_instance *)handle;
eventmgr = pp_handle->eventmgr;
+ pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
- if (eventmgr != NULL)
- pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
return 0;
}
static int pp_resume(void *handle)
{
- struct pp_instance *pp_handle;
struct pp_eventmgr *eventmgr;
struct pem_event_data event_data = { {0} };
struct pp_smumgr *smumgr;
- int ret;
+ int ret, ret1;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
- if (handle == NULL)
- return -EINVAL;
+ ret1 = pp_check(pp_handle);
+
+ if (ret1 != 0 && ret1 != PP_DPM_DISABLED)
+ return ret1;
- pp_handle = (struct pp_instance *)handle;
smumgr = pp_handle->smu_mgr;
- if (smumgr == NULL || smumgr->smumgr_funcs == NULL ||
- smumgr->smumgr_funcs->start_smu == NULL)
+ if (smumgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
ret = smumgr->smumgr_funcs->start_smu(smumgr);
if (ret) {
- printk(KERN_ERR "[ powerplay ] smc start failed\n");
+ pr_err("smc start failed\n");
smumgr->smumgr_funcs->smu_fini(smumgr);
return ret;
}
+ if (ret1 == PP_DPM_DISABLED)
+ return ret1;
+
eventmgr = pp_handle->eventmgr;
- if (eventmgr != NULL)
- pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
+
+ pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
return 0;
}
@@ -315,20 +325,19 @@ static int pp_dpm_fw_loading_complete(void *handle)
static int pp_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
- struct pp_instance *pp_handle;
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- pp_handle = (struct pp_instance *)handle;
+ if (ret != 0)
+ return ret;
hwmgr = pp_handle->hwmgr;
- PP_CHECK_HW(hwmgr);
-
if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -341,30 +350,34 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
void *handle)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
- return (((struct pp_instance *)handle)->hwmgr->dpm_level);
+ return hwmgr->dpm_level;
}
static int pp_dpm_get_sclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->get_sclk == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -374,16 +387,18 @@ static int pp_dpm_get_sclk(void *handle, bool low)
static int pp_dpm_get_mclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->get_mclk == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -393,16 +408,18 @@ static int pp_dpm_get_mclk(void *handle, bool low)
static int pp_dpm_powergate_vce(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -412,16 +429,18 @@ static int pp_dpm_powergate_vce(void *handle, bool gate)
static int pp_dpm_powergate_uvd(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -446,16 +465,13 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
void *input, void *output)
{
int ret = 0;
- struct pp_instance *pp_handle;
struct pem_event_data data = { {0} };
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
- pp_handle = (struct pp_instance *)handle;
+ ret = pp_check(pp_handle);
- if (pp_handle == NULL)
- return -EINVAL;
-
- if (pp_handle->eventmgr == NULL)
- return 0;
+ if (ret != 0)
+ return ret;
switch (event_id) {
case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
@@ -489,13 +505,17 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
struct pp_hwmgr *hwmgr;
struct pp_power_state *state;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- if (hwmgr == NULL || hwmgr->current_ps == NULL)
+ hwmgr = pp_handle->hwmgr;
+
+ if (hwmgr->current_ps == NULL)
return -EINVAL;
state = hwmgr->current_ps;
@@ -518,16 +538,18 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -537,16 +559,18 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
static int pp_dpm_get_fan_control_mode(void *handle)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -556,16 +580,18 @@ static int pp_dpm_get_fan_control_mode(void *handle)
static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -575,16 +601,18 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -594,13 +622,15 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
return -EINVAL;
@@ -611,16 +641,18 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
static int pp_dpm_get_temperature(void *handle)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->get_temperature == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -632,13 +664,17 @@ static int pp_dpm_get_pp_num_states(void *handle,
{
struct pp_hwmgr *hwmgr;
int i;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (!handle)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
+
+ hwmgr = pp_handle->hwmgr;
- if (hwmgr == NULL || hwmgr->ps == NULL)
+ if (hwmgr->ps == NULL)
return -EINVAL;
data->nums = hwmgr->num_ps;
@@ -670,13 +706,15 @@ static int pp_dpm_get_pp_num_states(void *handle,
static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (!handle)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (!hwmgr->soft_pp_table)
return -EINVAL;
@@ -689,13 +727,15 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (!handle)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (!hwmgr->hardcode_pp_table) {
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
@@ -717,16 +757,18 @@ static int pp_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (!handle)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -737,16 +779,18 @@ static int pp_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (!handle)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
@@ -755,16 +799,18 @@ static int pp_dpm_print_clock_levels(void *handle,
static int pp_dpm_get_sclk_od(void *handle)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (!handle)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -774,16 +820,18 @@ static int pp_dpm_get_sclk_od(void *handle)
static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (!handle)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -793,16 +841,18 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
static int pp_dpm_get_mclk_od(void *handle)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (!handle)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -812,16 +862,18 @@ static int pp_dpm_get_mclk_od(void *handle)
static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (!handle)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -831,16 +883,18 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (!handle)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->read_sensor == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
+ pr_info("%s was not implemented.\n", __func__);
return 0;
}
@@ -851,13 +905,18 @@ static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (handle) {
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ ret = pp_check(pp_handle);
- if (hwmgr && idx < hwmgr->num_vce_state_tables)
- return &hwmgr->vce_states[idx];
- }
+ if (ret != 0)
+ return NULL;
+
+ hwmgr = pp_handle->hwmgr;
+
+ if (hwmgr && idx < hwmgr->num_vce_state_tables)
+ return &hwmgr->vce_states[idx];
return NULL;
}
@@ -892,89 +951,44 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_vce_clock_state = pp_dpm_get_vce_clock_state,
};
-static int amd_pp_instance_init(struct amd_pp_init *pp_init,
- struct amd_powerplay *amd_pp)
+int amd_powerplay_create(struct amd_pp_init *pp_init,
+ void **handle)
{
- int ret;
- struct pp_instance *handle;
-
- handle = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
- if (handle == NULL)
- return -ENOMEM;
-
- handle->pp_valid = PP_VALID;
-
- ret = smum_init(pp_init, handle);
- if (ret)
- goto fail_smum;
-
-
- amd_pp->pp_handle = handle;
+ struct pp_instance *instance;
- if ((amdgpu_dpm == 0)
- || cgs_is_virtualization_enabled(pp_init->device))
- return 0;
+ if (pp_init == NULL || handle == NULL)
+ return -EINVAL;
- ret = hwmgr_init(pp_init, handle);
- if (ret)
- goto fail_hwmgr;
+ instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
+ if (instance == NULL)
+ return -ENOMEM;
- ret = eventmgr_init(handle);
- if (ret)
- goto fail_eventmgr;
+ instance->pp_valid = PP_VALID;
+ instance->chip_family = pp_init->chip_family;
+ instance->chip_id = pp_init->chip_id;
+ instance->pm_en = pp_init->pm_en;
+ instance->feature_mask = pp_init->feature_mask;
+ instance->device = pp_init->device;
+ *handle = instance;
return 0;
-
-fail_eventmgr:
- hwmgr_fini(handle->hwmgr);
-fail_hwmgr:
- smum_fini(handle->smu_mgr);
-fail_smum:
- kfree(handle);
- return ret;
}
-static int amd_pp_instance_fini(void *handle)
+int amd_powerplay_destroy(void *handle)
{
struct pp_instance *instance = (struct pp_instance *)handle;
- if (instance == NULL)
- return -EINVAL;
-
- if ((amdgpu_dpm != 0)
- && !cgs_is_virtualization_enabled(instance->smu_mgr->device)) {
- eventmgr_fini(instance->eventmgr);
- hwmgr_fini(instance->hwmgr);
+ if (instance->pm_en) {
+ kfree(instance->eventmgr);
+ kfree(instance->hwmgr);
+ instance->hwmgr = NULL;
+ instance->eventmgr = NULL;
}
- smum_fini(instance->smu_mgr);
- kfree(handle);
- return 0;
-}
-
-int amd_powerplay_init(struct amd_pp_init *pp_init,
- struct amd_powerplay *amd_pp)
-{
- int ret;
-
- if (pp_init == NULL || amd_pp == NULL)
- return -EINVAL;
-
- ret = amd_pp_instance_init(pp_init, amd_pp);
-
- if (ret)
- return ret;
-
- amd_pp->ip_funcs = &pp_ip_funcs;
- amd_pp->pp_funcs = &pp_dpm_funcs;
-
- return 0;
-}
-
-int amd_powerplay_fini(void *handle)
-{
- amd_pp_instance_fini(handle);
-
+ kfree(instance->smu_mgr);
+ instance->smu_mgr = NULL;
+ kfree(instance);
+ instance = NULL;
return 0;
}
@@ -985,33 +999,25 @@ int amd_powerplay_reset(void *handle)
struct pem_event_data event_data = { {0} };
int ret;
- if (instance == NULL)
- return -EINVAL;
-
- eventmgr = instance->eventmgr;
- if (!eventmgr || !eventmgr->pp_eventmgr_fini)
- return -EINVAL;
-
- eventmgr->pp_eventmgr_fini(eventmgr);
+ if (cgs_is_virtualization_enabled(instance->smu_mgr->device))
+ return PP_DPM_DISABLED;
- ret = pp_sw_fini(handle);
- if (ret)
+ ret = pp_check(instance);
+ if (ret != 0)
return ret;
- kfree(instance->hwmgr->ps);
-
- ret = pp_sw_init(handle);
+ ret = pp_hw_fini(handle);
if (ret)
return ret;
- if ((amdgpu_dpm == 0)
- || cgs_is_virtualization_enabled(instance->smu_mgr->device))
- return 0;
+ ret = hwmgr_hw_init(instance);
+ if (ret)
+ return PP_DPM_DISABLED;
- hw_init_power_state_table(instance->hwmgr);
+ eventmgr = instance->eventmgr;
- if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
- return -EINVAL;
+ if (eventmgr->pp_eventmgr_init == NULL)
+ return PP_DPM_DISABLED;
ret = eventmgr->pp_eventmgr_init(eventmgr);
if (ret)
@@ -1026,12 +1032,15 @@ int amd_powerplay_display_configuration_change(void *handle,
const struct amd_pp_display_configuration *display_config)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- PP_CHECK((struct pp_instance *)handle);
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
phm_store_dal_configuration_data(hwmgr, display_config);
@@ -1042,15 +1051,18 @@ int amd_powerplay_get_display_power_level(void *handle,
struct amd_pp_simple_clock_info *output)
{
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- PP_CHECK((struct pp_instance *)handle);
+ ret = pp_check(pp_handle);
- if (output == NULL)
- return -EINVAL;
+ if (ret != 0)
+ return ret;
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ hwmgr = pp_handle->hwmgr;
- PP_CHECK_HW(hwmgr);
+ if (output == NULL)
+ return -EINVAL;
return phm_get_dal_power_level(hwmgr, output);
}
@@ -1058,18 +1070,18 @@ int amd_powerplay_get_display_power_level(void *handle,
int amd_powerplay_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
{
- struct pp_hwmgr *hwmgr;
struct amd_pp_simple_clock_info simple_clocks;
struct pp_clock_info hw_clocks;
+ struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- PP_CHECK((struct pp_instance *)handle);
-
- if (clocks == NULL)
- return -EINVAL;
+ ret = pp_check(pp_handle);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ if (ret != 0)
+ return ret;
- PP_CHECK_HW(hwmgr);
+ hwmgr = pp_handle->hwmgr;
phm_get_dal_power_level(hwmgr, &simple_clocks);
@@ -1105,18 +1117,20 @@ int amd_powerplay_get_current_clocks(void *handle,
int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
{
int result = -1;
+ struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- struct pp_hwmgr *hwmgr;
+ ret = pp_check(pp_handle);
- PP_CHECK((struct pp_instance *)handle);
+ if (ret != 0)
+ return ret;
+
+ hwmgr = pp_handle->hwmgr;
if (clocks == NULL)
return -EINVAL;
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
- PP_CHECK_HW(hwmgr);
-
result = phm_get_clock_by_type(hwmgr, type, clocks);
return result;
@@ -1125,21 +1139,24 @@ int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, s
int amd_powerplay_get_display_mode_validation_clocks(void *handle,
struct amd_pp_simple_clock_info *clocks)
{
- int result = -1;
struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- PP_CHECK((struct pp_instance *)handle);
+ ret = pp_check(pp_handle);
- if (clocks == NULL)
- return -EINVAL;
+ if (ret != 0)
+ return ret;
+
+ hwmgr = pp_handle->hwmgr;
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
- PP_CHECK_HW(hwmgr);
+ if (clocks == NULL)
+ return -EINVAL;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
- result = phm_get_max_high_clocks(hwmgr, clocks);
+ ret = phm_get_max_high_clocks(hwmgr, clocks);
- return result;
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
index d5ec8ccbe97d..a3cd230d636d 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
@@ -151,7 +151,7 @@ static int thermal_interrupt_callback(void *private_data,
unsigned src_id, const uint32_t *iv_entry)
{
/* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/
- printk("current thermal is out of range \n");
+ pr_info("current thermal is out of range \n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index fb88e4e5d625..781e53dcf128 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -60,9 +60,8 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
}
-int eventmgr_init(struct pp_instance *handle)
+int eventmgr_early_init(struct pp_instance *handle)
{
- int result = 0;
struct pp_eventmgr *eventmgr;
if (handle == NULL)
@@ -79,12 +78,6 @@ int eventmgr_init(struct pp_instance *handle)
eventmgr->pp_eventmgr_init = pem_init;
eventmgr->pp_eventmgr_fini = pem_fini;
- return result;
-}
-
-int eventmgr_fini(struct pp_eventmgr *eventmgr)
-{
- kfree(eventmgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
index ec36c0e28388..e04216ec7ee1 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
@@ -38,10 +38,13 @@
int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{
+ if (eventmgr == NULL || eventmgr->hwmgr == NULL)
+ return -EINVAL;
+
if (pem_is_hw_access_blocked(eventmgr))
return 0;
- phm_force_dpm_levels(eventmgr->hwmgr, AMD_DPM_FORCED_LEVEL_AUTO);
+ phm_force_dpm_levels(eventmgr->hwmgr, eventmgr->hwmgr->dpm_level);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index 6bb79c94cb9f..b33935fcf428 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -161,28 +161,25 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- if (cz_hwmgr->uvd_power_gated == bgate)
- return 0;
-
cz_hwmgr->uvd_power_gated = bgate;
if (bgate) {
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
cz_dpm_update_uvd_dpm(hwmgr, true);
cz_dpm_powerdown_uvd(hwmgr);
} else {
cz_dpm_powerup_uvd(hwmgr);
- cgs_set_powergating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
cz_dpm_update_uvd_dpm(hwmgr, false);
}
@@ -193,57 +190,50 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating)) {
- if (cz_hwmgr->vce_power_gated != bgate) {
- if (bgate) {
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
- cgs_set_powergating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- cz_enable_disable_vce_dpm(hwmgr, false);
- cz_dpm_powerdown_vce(hwmgr);
- cz_hwmgr->vce_power_gated = true;
- } else {
- cz_dpm_powerup_vce(hwmgr);
- cz_hwmgr->vce_power_gated = false;
- cgs_set_powergating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
- cz_dpm_update_vce_dpm(hwmgr);
- cz_enable_disable_vce_dpm(hwmgr, true);
- return 0;
- }
- }
+ if (bgate) {
+ cgs_set_powergating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ cgs_set_clockgating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ cz_enable_disable_vce_dpm(hwmgr, false);
+ cz_dpm_powerdown_vce(hwmgr);
+ cz_hwmgr->vce_power_gated = true;
} else {
- cz_hwmgr->vce_power_gated = bgate;
+ cz_dpm_powerup_vce(hwmgr);
+ cz_hwmgr->vce_power_gated = false;
+ cgs_set_clockgating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ cgs_set_powergating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
- cz_enable_disable_vce_dpm(hwmgr, !bgate);
+ cz_enable_disable_vce_dpm(hwmgr, true);
return 0;
}
- if (!cz_hwmgr->vce_power_gated)
- cz_dpm_update_vce_dpm(hwmgr);
-
return 0;
}
static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
/*we don't need an exit table here, because there is only D3 cold on Kv*/
- { phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize },
- { phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize },
+ {
+ .isFunctionNeededInRuntimeTable = phm_cf_want_uvd_power_gating,
+ .tableFunction = cz_tf_uvd_power_gating_initialize
+ },
+ {
+ .isFunctionNeededInRuntimeTable = phm_cf_want_vce_power_gating,
+ .tableFunction = cz_tf_vce_power_gating_initialize
+ },
/* to do { NULL, cz_tf_xdma_power_gating_enable }, */
- { NULL, NULL }
+ { }
};
const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 0fb4e8c8f5e1..a4cde3d778b8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -20,13 +20,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "atom-types.h"
#include "atombios.h"
#include "processpptables.h"
-#include "pp_debug.h"
#include "cgs_common.h"
#include "smu/smu_8_0_d.h"
#include "smu8_fusion.h"
@@ -38,7 +38,6 @@
#include "cz_hwmgr.h"
#include "power_state.h"
#include "cz_clockpowergating.h"
-#include "pp_debug.h"
#define ixSMUSVI_NB_CURRENTVID 0xD8230044
#define CURRENT_NB_VID_MASK 0xff000000
@@ -288,7 +287,7 @@ static int cz_init_dynamic_state_adjustment_rule_settings(
kzalloc(table_size, GFP_KERNEL);
if (NULL == table_clk_vlt) {
- printk(KERN_ERR "[ powerplay ] Can not allocate memory!\n");
+ pr_err("Can not allocate memory!\n");
return -ENOMEM;
}
@@ -329,12 +328,12 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
&size, &frev, &crev);
if (crev != 9) {
- printk(KERN_ERR "[ powerplay ] Unsupported IGP table: %d %d\n", frev, crev);
+ pr_err("Unsupported IGP table: %d %d\n", frev, crev);
return -EINVAL;
}
if (info == NULL) {
- printk(KERN_ERR "[ powerplay ] Could not retrieve the Integrated System Info Table!\n");
+ pr_err("Could not retrieve the Integrated System Info Table!\n");
return -EINVAL;
}
@@ -361,7 +360,7 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
if (cz_hwmgr->sys_info.htc_tmp_lmt <=
cz_hwmgr->sys_info.htc_hyst_lmt) {
- printk(KERN_ERR "[ powerplay ] The htcTmpLmt should be larger than htcHystLmt.\n");
+ pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
return -EINVAL;
}
@@ -723,7 +722,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
clock = hwmgr->display_config.min_core_set_clock;
if (clock == 0)
- printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
+ pr_info("min_core_set_clock not set\n");
if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
cz_hwmgr->sclk_dpm.hard_min_clk = clock;
@@ -888,13 +887,13 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
}
static const struct phm_master_table_item cz_set_power_state_list[] = {
- {NULL, cz_tf_update_sclk_limit},
- {NULL, cz_tf_set_deep_sleep_sclk_threshold},
- {NULL, cz_tf_set_watermark_threshold},
- {NULL, cz_tf_set_enabled_levels},
- {NULL, cz_tf_enable_nb_dpm},
- {NULL, cz_tf_update_low_mem_pstate},
- {NULL, NULL}
+ { .tableFunction = cz_tf_update_sclk_limit },
+ { .tableFunction = cz_tf_set_deep_sleep_sclk_threshold },
+ { .tableFunction = cz_tf_set_watermark_threshold },
+ { .tableFunction = cz_tf_set_enabled_levels },
+ { .tableFunction = cz_tf_enable_nb_dpm },
+ { .tableFunction = cz_tf_update_low_mem_pstate },
+ { }
};
static const struct phm_master_table_header cz_set_power_state_master = {
@@ -904,15 +903,15 @@ static const struct phm_master_table_header cz_set_power_state_master = {
};
static const struct phm_master_table_item cz_setup_asic_list[] = {
- {NULL, cz_tf_reset_active_process_mask},
- {NULL, cz_tf_upload_pptable_to_smu},
- {NULL, cz_tf_init_sclk_limit},
- {NULL, cz_tf_init_uvd_limit},
- {NULL, cz_tf_init_vce_limit},
- {NULL, cz_tf_init_acp_limit},
- {NULL, cz_tf_init_power_gate_state},
- {NULL, cz_tf_init_sclk_threshold},
- {NULL, NULL}
+ { .tableFunction = cz_tf_reset_active_process_mask },
+ { .tableFunction = cz_tf_upload_pptable_to_smu },
+ { .tableFunction = cz_tf_init_sclk_limit },
+ { .tableFunction = cz_tf_init_uvd_limit },
+ { .tableFunction = cz_tf_init_vce_limit },
+ { .tableFunction = cz_tf_init_acp_limit },
+ { .tableFunction = cz_tf_init_power_gate_state },
+ { .tableFunction = cz_tf_init_sclk_threshold },
+ { }
};
static const struct phm_master_table_header cz_setup_asic_master = {
@@ -957,10 +956,10 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
}
static const struct phm_master_table_item cz_power_down_asic_list[] = {
- {NULL, cz_tf_power_up_display_clock_sys_pll},
- {NULL, cz_tf_clear_nb_dpm_flag},
- {NULL, cz_tf_reset_cc6_data},
- {NULL, NULL}
+ { .tableFunction = cz_tf_power_up_display_clock_sys_pll },
+ { .tableFunction = cz_tf_clear_nb_dpm_flag },
+ { .tableFunction = cz_tf_reset_cc6_data },
+ { }
};
static const struct phm_master_table_header cz_power_down_asic_master = {
@@ -1068,8 +1067,8 @@ static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr,
}
static const struct phm_master_table_item cz_disable_dpm_list[] = {
- { NULL, cz_tf_check_for_dpm_enabled},
- {NULL, NULL},
+ { .tableFunction = cz_tf_check_for_dpm_enabled },
+ { },
};
@@ -1080,13 +1079,13 @@ static const struct phm_master_table_header cz_disable_dpm_master = {
};
static const struct phm_master_table_item cz_enable_dpm_list[] = {
- { NULL, cz_tf_check_for_dpm_disabled },
- { NULL, cz_tf_program_voting_clients },
- { NULL, cz_tf_start_dpm},
- { NULL, cz_tf_program_bootup_state},
- { NULL, cz_tf_enable_didt },
- { NULL, cz_tf_reset_acp_boot_level },
- {NULL, NULL},
+ { .tableFunction = cz_tf_check_for_dpm_disabled },
+ { .tableFunction = cz_tf_program_voting_clients },
+ { .tableFunction = cz_tf_start_dpm },
+ { .tableFunction = cz_tf_program_bootup_state },
+ { .tableFunction = cz_tf_enable_didt },
+ { .tableFunction = cz_tf_reset_acp_boot_level },
+ { },
};
static const struct phm_master_table_header cz_enable_dpm_master = {
@@ -1162,13 +1161,13 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
result = cz_initialize_dpm_defaults(hwmgr);
if (result != 0) {
- printk(KERN_ERR "[ powerplay ] cz_initialize_dpm_defaults failed\n");
+ pr_err("cz_initialize_dpm_defaults failed\n");
return result;
}
result = cz_get_system_info_data(hwmgr);
if (result != 0) {
- printk(KERN_ERR "[ powerplay ] cz_get_system_info_data failed\n");
+ pr_err("cz_get_system_info_data failed\n");
return result;
}
@@ -1177,40 +1176,40 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
result = phm_construct_table(hwmgr, &cz_setup_asic_master,
&(hwmgr->setup_asic));
if (result != 0) {
- printk(KERN_ERR "[ powerplay ] Fail to construct setup ASIC\n");
+ pr_err("Fail to construct setup ASIC\n");
return result;
}
result = phm_construct_table(hwmgr, &cz_power_down_asic_master,
&(hwmgr->power_down_asic));
if (result != 0) {
- printk(KERN_ERR "[ powerplay ] Fail to construct power down ASIC\n");
+ pr_err("Fail to construct power down ASIC\n");
return result;
}
result = phm_construct_table(hwmgr, &cz_disable_dpm_master,
&(hwmgr->disable_dynamic_state_management));
if (result != 0) {
- printk(KERN_ERR "[ powerplay ] Fail to disable_dynamic_state\n");
+ pr_err("Fail to disable_dynamic_state\n");
return result;
}
result = phm_construct_table(hwmgr, &cz_enable_dpm_master,
&(hwmgr->enable_dynamic_state_management));
if (result != 0) {
- printk(KERN_ERR "[ powerplay ] Fail to enable_dynamic_state\n");
+ pr_err("Fail to enable_dynamic_state\n");
return result;
}
result = phm_construct_table(hwmgr, &cz_set_power_state_master,
&(hwmgr->set_power_state));
if (result != 0) {
- printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n");
+ pr_err("Fail to construct set_power_state\n");
return result;
}
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS;
result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings));
if (result != 0) {
- printk(KERN_ERR "[ powerplay ] Fail to construct enable_clock_power_gatings\n");
+ pr_err("Fail to construct enable_clock_power_gatings\n");
return result;
}
return result;
@@ -1218,9 +1217,15 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- if (hwmgr != NULL && hwmgr->backend != NULL) {
+ if (hwmgr != NULL) {
+ phm_destroy_table(hwmgr, &(hwmgr->enable_clock_power_gatings));
+ phm_destroy_table(hwmgr, &(hwmgr->set_power_state));
+ phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management));
+ phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management));
+ phm_destroy_table(hwmgr, &(hwmgr->power_down_asic));
+ phm_destroy_table(hwmgr, &(hwmgr->setup_asic));
kfree(hwmgr->backend);
- kfree(hwmgr);
+ hwmgr->backend = NULL;
}
return 0;
}
@@ -1939,7 +1944,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.read_sensor = cz_read_sensor,
};
-int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
+int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
{
hwmgr->hwmgr_func = &cz_hwmgr_funcs;
hwmgr->pptable_func = &pptable_funcs;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
index c477f1cf3f23..508b422d6159 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
@@ -316,7 +316,6 @@ struct cz_hwmgr {
struct pp_hwmgr;
-int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr);
int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr);
int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index 71822ae73a12..bc7d8bd7e7cb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -35,7 +35,7 @@ static int phm_run_table(struct pp_hwmgr *hwmgr,
phm_table_function *function;
if (rt_table->function_list == NULL) {
- pr_debug("[ powerplay ] this function not implement!\n");
+ pr_debug("this function not implement!\n");
return 0;
}
@@ -63,14 +63,14 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
void *temp_storage;
if (hwmgr == NULL || rt_table == NULL) {
- printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
+ pr_err("Invalid Parameter!\n");
return -EINVAL;
}
if (0 != rt_table->storage_size) {
temp_storage = kzalloc(rt_table->storage_size, GFP_KERNEL);
if (temp_storage == NULL) {
- printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n");
+ pr_err("Could not allocate table temporary storage\n");
return -ENOMEM;
}
} else {
@@ -95,7 +95,7 @@ int phm_construct_table(struct pp_hwmgr *hwmgr,
phm_table_function *rtf;
if (hwmgr == NULL || master_table == NULL || rt_table == NULL) {
- printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
+ pr_err("Invalid Parameter!\n");
return -EINVAL;
}
@@ -116,7 +116,7 @@ int phm_construct_table(struct pp_hwmgr *hwmgr,
for (table_item = master_table->master_list;
NULL != table_item->tableFunction; table_item++) {
if ((rtf - run_time_list) > function_count) {
- printk(KERN_ERR "[ powerplay ] Check function results have changed\n");
+ pr_err("Check function results have changed\n");
kfree(run_time_list);
return -EINVAL;
}
@@ -128,7 +128,7 @@ int phm_construct_table(struct pp_hwmgr *hwmgr,
}
if ((rtf - run_time_list) > function_count) {
- printk(KERN_ERR "[ powerplay ] Check function results have changed\n");
+ pr_err("Check function results have changed\n");
kfree(run_time_list);
return -EINVAL;
}
@@ -144,7 +144,7 @@ int phm_destroy_table(struct pp_hwmgr *hwmgr,
struct phm_runtime_table_header *rt_table)
{
if (hwmgr == NULL || rt_table == NULL) {
- printk(KERN_ERR "[ powerplay ] Invalid Parameter\n");
+ pr_err("Invalid Parameter\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index c355a0f51663..0eb8e886bf35 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -20,11 +20,11 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "pp_debug.h"
#include <linux/errno.h>
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "power_state.h"
-#include "pp_debug.h"
#define PHM_FUNC_CHECK(hw) \
do { \
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index b03606405a53..2ea9c0e78689 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -20,6 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
+#include "pp_debug.h"
#include "linux/delay.h"
#include <linux/types.h>
#include <linux/kernel.h>
@@ -29,13 +31,12 @@
#include "power_state.h"
#include "hwmgr.h"
#include "pppcielanes.h"
-#include "pp_debug.h"
#include "ppatomctrl.h"
#include "ppsmc.h"
#include "pp_acpi.h"
#include "amd_acpi.h"
-extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
@@ -49,11 +50,11 @@ uint8_t convert_to_vid(uint16_t vddc)
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
}
-int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
+int hwmgr_early_init(struct pp_instance *handle)
{
struct pp_hwmgr *hwmgr;
- if ((handle == NULL) || (pp_init == NULL))
+ if (handle == NULL)
return -EINVAL;
hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
@@ -62,19 +63,20 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
handle->hwmgr = hwmgr;
hwmgr->smumgr = handle->smu_mgr;
- hwmgr->device = pp_init->device;
- hwmgr->chip_family = pp_init->chip_family;
- hwmgr->chip_id = pp_init->chip_id;
+ hwmgr->device = handle->device;
+ hwmgr->chip_family = handle->chip_family;
+ hwmgr->chip_id = handle->chip_id;
+ hwmgr->feature_mask = handle->feature_mask;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
hwmgr->power_source = PP_PowerSource_AC;
hwmgr->pp_table_version = PP_TABLE_V1;
-
+ hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
hwmgr_init_default_caps(hwmgr);
hwmgr_set_user_specify_caps(hwmgr);
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CZ:
- cz_hwmgr_init(hwmgr);
+ cz_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
@@ -102,7 +104,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
default:
return -EINVAL;
}
- smu7_hwmgr_init(hwmgr);
+ smu7_init_function_pointers(hwmgr);
break;
default:
return -EINVAL;
@@ -111,28 +113,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
return 0;
}
-int hwmgr_fini(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr == NULL || hwmgr->ps == NULL)
- return -EINVAL;
-
- /* do hwmgr finish*/
- kfree(hwmgr->hardcode_pp_table);
-
- kfree(hwmgr->backend);
-
- kfree(hwmgr->start_thermal_controller.function_list);
-
- kfree(hwmgr->set_temperature_range.function_list);
-
- kfree(hwmgr->ps);
- kfree(hwmgr->current_ps);
- kfree(hwmgr->request_ps);
- kfree(hwmgr);
- return 0;
-}
-
-int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
+static int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
{
int result;
unsigned int i;
@@ -156,12 +137,20 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
return -ENOMEM;
hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
- if (hwmgr->request_ps == NULL)
+ if (hwmgr->request_ps == NULL) {
+ kfree(hwmgr->ps);
+ hwmgr->ps = NULL;
return -ENOMEM;
+ }
hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
- if (hwmgr->current_ps == NULL)
+ if (hwmgr->current_ps == NULL) {
+ kfree(hwmgr->request_ps);
+ kfree(hwmgr->ps);
+ hwmgr->request_ps = NULL;
+ hwmgr->ps = NULL;
return -ENOMEM;
+ }
state = hwmgr->ps;
@@ -181,10 +170,77 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
state = (struct pp_power_state *)((unsigned long)state + size);
}
+ return 0;
+}
+static int hw_fini_power_state_table(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ kfree(hwmgr->current_ps);
+ kfree(hwmgr->request_ps);
+ kfree(hwmgr->ps);
+ hwmgr->request_ps = NULL;
+ hwmgr->ps = NULL;
+ hwmgr->current_ps = NULL;
return 0;
}
+int hwmgr_hw_init(struct pp_instance *handle)
+{
+ struct pp_hwmgr *hwmgr;
+ int ret = 0;
+
+ if (handle == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+
+ if (hwmgr->pptable_func == NULL ||
+ hwmgr->pptable_func->pptable_init == NULL ||
+ hwmgr->hwmgr_func->backend_init == NULL)
+ return -EINVAL;
+
+ ret = hwmgr->pptable_func->pptable_init(hwmgr);
+ if (ret)
+ goto err;
+
+ ret = hwmgr->hwmgr_func->backend_init(hwmgr);
+ if (ret)
+ goto err1;
+
+ ret = hw_init_power_state_table(hwmgr);
+ if (ret)
+ goto err2;
+ return 0;
+err2:
+ if (hwmgr->hwmgr_func->backend_fini)
+ hwmgr->hwmgr_func->backend_fini(hwmgr);
+err1:
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
+err:
+ pr_err("amdgpu: powerplay initialization failed\n");
+ return ret;
+}
+
+int hwmgr_hw_fini(struct pp_instance *handle)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (handle == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+
+ if (hwmgr->hwmgr_func->backend_fini)
+ hwmgr->hwmgr_func->backend_fini(hwmgr);
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
+ return hw_fini_power_state_table(hwmgr);
+}
+
/**
* Returns once the part of the register indicated by the mask has
@@ -197,7 +253,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
uint32_t cur_value;
if (hwmgr == NULL || hwmgr->device == NULL) {
- printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
+ pr_err("Invalid Hardware Manager!");
return -EINVAL;
}
@@ -227,7 +283,7 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t mask)
{
if (hwmgr == NULL || hwmgr->device == NULL) {
- printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
+ pr_err("Invalid Hardware Manager!");
return;
}
@@ -288,7 +344,7 @@ int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
kfree(table);
-
+ table = NULL;
return 0;
}
@@ -549,7 +605,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
if (NULL == table_clk_vlt) {
- printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
+ pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
return -ENOMEM;
} else {
table_clk_vlt->count = 4;
@@ -569,21 +625,6 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
return 0;
}
-int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
-
- if (NULL != hwmgr->backend) {
- kfree(hwmgr->backend);
- hwmgr->backend = NULL;
- }
-
- return 0;
-}
-
uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
{
uint32_t level = 0;
@@ -625,7 +666,7 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
return;
}
}
- printk(KERN_ERR "DAL requested level can not"
+ pr_err("DAL requested level can not"
" found a available voltage in VDDC DPM Table \n");
}
@@ -683,14 +724,14 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
{
- if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
+ if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
else
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
- if (amdgpu_pp_feature_mask & PP_POWER_CONTAINMENT_MASK) {
+ if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -701,7 +742,6 @@ int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
}
- hwmgr->feature_mask = amdgpu_pp_feature_mask;
return 0;
}
@@ -727,17 +767,10 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
- /* power tune caps Assume disabled */
+
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DBRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TDRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TCPRamping);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -746,9 +779,19 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
- if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12))
+
+ if (hwmgr->chip_id != CHIP_POLARIS10)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SPLLShutdownSupport);
+
+ if (hwmgr->chip_id != CHIP_POLARIS11) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 0894527d932f..953e0c9ad7cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -20,13 +20,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "pp_debug.h"
#include <linux/module.h>
#include <linux/slab.h>
#include "ppatomctrl.h"
#include "atombios.h"
#include "cgs_common.h"
-#include "pp_debug.h"
#include "ppevvmath.h"
#define MEM_ID_MASK 0xff000000
@@ -145,10 +145,10 @@ int atomctrl_initialize_mc_reg_table(
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
if (module_index >= vram_info->ucNumOfVRAMModule) {
- printk(KERN_ERR "[ powerplay ] Invalid VramInfo table.");
+ pr_err("Invalid VramInfo table.");
result = -1;
} else if (vram_info->sHeader.ucTableFormatRevision < 2) {
- printk(KERN_ERR "[ powerplay ] Invalid VramInfo table.");
+ pr_err("Invalid VramInfo table.");
result = -1;
}
@@ -688,7 +688,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
break;
default:
- printk(KERN_ERR "DPM Level not supported\n");
+ pr_err("DPM Level not supported\n");
fPowerDPMx = Convert_ULONG_ToFraction(1);
fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
}
@@ -1396,3 +1396,25 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
return 0;
}
+
+int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
+ uint16_t *load_line)
+{
+ ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
+ (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device);
+
+ const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
+
+ PP_ASSERT_WITH_CODE((NULL != voltage_info),
+ "Could not find Voltage Table in BIOS.", return -EINVAL);
+
+ voltage_object = atomctrl_lookup_voltage_type_v3
+ (voltage_info, voltage_type, VOLTAGE_OBJ_SVID2);
+
+ *svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId;
+ *svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId;
+ *load_line = voltage_object->asSVID2Obj.usLoadLine_PSI;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index fc898afce002..e9fe2e84006b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -311,5 +311,8 @@ extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_a
extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
+extern int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
+ uint16_t *load_line);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index c45bd2560468..84f01fd33aff 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -20,13 +20,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "pp_debug.h"
#include <linux/module.h>
#include <linux/slab.h>
#include "process_pptables_v1_0.h"
#include "ppatomctrl.h"
#include "atombios.h"
-#include "pp_debug.h"
#include "hwmgr.h"
#include "cgs_common.h"
#include "pptable_v1_0.h"
@@ -535,7 +535,7 @@ static int get_pcie_table(
if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
else
- printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+ pr_err("Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
Disregarding the excess entries... \n");
pcie_table->count = pcie_count;
@@ -577,7 +577,7 @@ static int get_pcie_table(
if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
else
- printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+ pr_err("Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
Disregarding the excess entries... \n");
pcie_table->count = pcie_count;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index a4e9cf429e62..ed6c934927fb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -27,7 +28,6 @@
#include "processpptables.h"
#include <atom-types.h>
#include <atombios.h>
-#include "pp_debug.h"
#include "pptable.h"
#include "power_state.h"
#include "hwmgr.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index a1fc4fcac1e0..8cf71f3c6d0e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -147,22 +147,22 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
data->uvd_power_gated = bgate;
if (bgate) {
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
smu7_update_uvd_dpm(hwmgr, true);
smu7_powerdown_uvd(hwmgr);
} else {
smu7_powerup_uvd(hwmgr);
- cgs_set_powergating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
smu7_update_uvd_dpm(hwmgr, false);
}
@@ -173,12 +173,12 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (data->vce_power_gated == bgate)
- return 0;
-
data->vce_power_gated = bgate;
if (bgate) {
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
@@ -186,10 +186,13 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
smu7_powerdown_vce(hwmgr);
} else {
smu7_powerup_vce(hwmgr);
- smu7_update_vce_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ smu7_update_vce_dpm(hwmgr, false);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index d52a28c343e3..c96ed9ed7eaf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -22,7 +22,7 @@
*/
#ifndef _SMU7_CLOCK_POWER_GATING_H_
-#define _SMU7_CLOCK__POWER_GATING_H_
+#define _SMU7_CLOCK_POWER_GATING_H_
#include "smu7_hwmgr.h"
#include "pp_asicblocks.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index a74f60a575ae..f75ee33ec5bb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -20,13 +20,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "pp_debug.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <asm/div64.h>
#include "linux/delay.h"
#include "pp_acpi.h"
-#include "pp_debug.h"
#include "ppatomctrl.h"
#include "atombios.h"
#include "pptable_v1_0.h"
@@ -40,6 +40,8 @@
#include "hwmgr.h"
#include "smu7_hwmgr.h"
+#include "smu7_smumgr.h"
+#include "smu_ucode_xfer_vi.h"
#include "smu7_powertune.h"
#include "smu7_dyn_defaults.h"
#include "smu7_thermal.h"
@@ -88,6 +90,8 @@ enum DPM_EVENT_SRC {
};
static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
+static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask);
static struct smu7_power_state *cast_phw_smu7_power_state(
struct pp_hw_power_state *hw_ps)
@@ -994,7 +998,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
SWRST_COMMAND_1, RESETLC, 0x0);
if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
+ pr_err("Failed to enable Sclk DPM and Mclk DPM!");
return -EINVAL;
}
@@ -1079,7 +1083,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
switch (sources) {
default:
- printk(KERN_ERR "Unknown throttling event sources.");
+ pr_err("Unknown throttling event sources.");
/* fall through */
case 0:
protection = false;
@@ -1292,6 +1296,10 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable SMC CAC!", result = tmp_result);
+ tmp_result = smu7_disable_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable DIDT!", result = tmp_result);
+
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -1375,6 +1383,15 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->force_pcie_gen = PP_PCIEGenInvalid;
data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
+ if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) {
+ uint8_t tmp1, tmp2;
+ uint16_t tmp3 = 0;
+ atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
+ &tmp3);
+ tmp3 = (tmp3 >> 5) & 0x3;
+ data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
+ }
+
data->fast_watermark_threshold = 100;
if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
@@ -1499,7 +1516,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
data->vddcgfx_leakage.count++;
}
} else {
- printk("Error retrieving EVV voltage value!\n");
+ pr_info("Error retrieving EVV voltage value!\n");
}
}
} else {
@@ -1527,7 +1544,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
if (vddc >= 2000 || vddc == 0)
return -EINVAL;
} else {
- printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
+ pr_warning("failed to retrieving EVV voltage!\n");
continue;
}
@@ -1567,7 +1584,7 @@ static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+ pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
}
/**
@@ -2032,7 +2049,7 @@ static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+ pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
}
@@ -2267,6 +2284,21 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
return 0;
}
+static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+ }
+ pp_smu7_thermal_fini(hwmgr);
+ if (NULL != hwmgr->backend) {
+ kfree(hwmgr->backend);
+ hwmgr->backend = NULL;
+ }
+
+ return 0;
+}
+
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data;
@@ -2277,6 +2309,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
return -ENOMEM;
hwmgr->backend = data;
+ pp_smu7_thermal_initialize(hwmgr);
smu7_patch_voltage_workaround(hwmgr);
smu7_init_dpm_defaults(hwmgr);
@@ -2285,7 +2318,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
result = smu7_get_evv_voltages(hwmgr);
if (result) {
- printk("Get EVV Voltage Failed. Abort Driver loading!\n");
+ pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
return -EINVAL;
}
@@ -2334,7 +2367,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
smu7_thermal_parameter_init(hwmgr);
} else {
/* Ignore return value in here, we are cleaning up a mess. */
- phm_hwmgr_backend_fini(hwmgr);
+ smu7_hwmgr_backend_fini(hwmgr);
}
return 0;
@@ -2466,36 +2499,156 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
}
return 0;
+}
+
+static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
+{
+ uint32_t percentage;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+ int32_t tmp_mclk;
+ int32_t tmp_sclk;
+ int32_t count;
+
+ if (golden_dpm_table->mclk_table.count < 1)
+ return -EINVAL;
+
+ percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+
+ if (golden_dpm_table->mclk_table.count == 1) {
+ percentage = 70;
+ tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+ *mclk_mask = golden_dpm_table->mclk_table.count - 1;
+ } else {
+ tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
+ *mclk_mask = golden_dpm_table->mclk_table.count - 2;
+ }
+
+ tmp_sclk = tmp_mclk * percentage / 100;
+
+ if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
+ count >= 0; count--) {
+ if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
+ tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
+ *sclk_mask = count;
+ break;
+ }
+ }
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+ *sclk_mask = 0;
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
+ } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
+ if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
+ tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
+ *sclk_mask = count;
+ break;
+ }
+ }
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+ *sclk_mask = 0;
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
+ *mclk_mask = 0;
+ else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ *mclk_mask = golden_dpm_table->mclk_table.count - 1;
+
+ *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
+ return 0;
}
+
static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
int ret = 0;
+ uint32_t sclk_mask = 0;
+ uint32_t mclk_mask = 0;
+ uint32_t pcie_mask = 0;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (level == hwmgr->dpm_level)
+ return ret;
+
+ if (!(hwmgr->dpm_level & profile_mode_mask)) {
+ /* enter profile mode, save current level, disable gfx cg*/
+ if (level & profile_mode_mask) {
+ hwmgr->saved_dpm_level = hwmgr->dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ }
+ } else {
+ /* exit profile mode, restore level, enable gfx cg*/
+ if (!(level & profile_mode_mask)) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ level = hwmgr->saved_dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ }
+ }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
ret = smu7_force_dpm_lowest(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = smu7_unforce_dpm_levels(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
+ smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
+ smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
+
break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
- hwmgr->dpm_level = level;
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
- return ret;
+ return 0;
}
static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
@@ -2898,11 +3051,11 @@ static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
if (dep_mclk_table->entries[0].clk !=
data->vbios_boot_state.mclk_bootup_value)
- printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+ pr_err("Single MCLK entry VDDCI/MCLK dependency table "
"does not match VBIOS boot MCLK level");
if (dep_mclk_table->entries[0].vddci !=
data->vbios_boot_state.vddci_bootup_value)
- printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+ pr_err("Single VDDCI entry VDDCI/MCLK dependency table "
"does not match VBIOS boot VDDCI level");
}
@@ -3046,11 +3199,11 @@ static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
if (dep_mclk_table->entries[0].clk !=
data->vbios_boot_state.mclk_bootup_value)
- printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+ pr_err("Single MCLK entry VDDCI/MCLK dependency table "
"does not match VBIOS boot MCLK level");
if (dep_mclk_table->entries[0].v !=
data->vbios_boot_state.vddci_bootup_value)
- printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+ pr_err("Single VDDCI entry VDDCI/MCLK dependency table "
"does not match VBIOS boot VDDCI level");
}
@@ -3590,9 +3743,9 @@ static int smu7_notify_link_speed_change_after_state_change(
if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
if (PP_PCIEGen2 == target_link_speed)
- printk("PSPP request to switch to Gen2 from Gen3 Failed!");
+ pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
else
- printk("PSPP request to switch to Gen1 from Gen2 Failed!");
+ pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
}
}
@@ -4029,7 +4182,9 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ AMD_DPM_FORCED_LEVEL_LOW |
+ AMD_DPM_FORCED_LEVEL_HIGH))
return -EINVAL;
switch (type) {
@@ -4252,16 +4407,14 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
return -EINVAL;
dep_sclk_table = table_info->vdd_dep_on_sclk;
- for (i = 0; i < dep_sclk_table->count; i++) {
+ for (i = 0; i < dep_sclk_table->count; i++)
clocks->clock[i] = dep_sclk_table->entries[i].clk;
- clocks->count++;
- }
+ clocks->count = dep_sclk_table->count;
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
- for (i = 0; i < sclk_table->count; i++) {
+ for (i = 0; i < sclk_table->count; i++)
clocks->clock[i] = sclk_table->entries[i].clk;
- clocks->count++;
- }
+ clocks->count = sclk_table->count;
}
return 0;
@@ -4295,14 +4448,13 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
clocks->clock[i] = dep_mclk_table->entries[i].clk;
clocks->latency[i] = smu7_get_mem_latency(hwmgr,
dep_mclk_table->entries[i].clk);
- clocks->count++;
}
+ clocks->count = dep_mclk_table->count;
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
- for (i = 0; i < mclk_table->count; i++) {
+ for (i = 0; i < mclk_table->count; i++)
clocks->clock[i] = mclk_table->entries[i].clk;
- clocks->count++;
- }
+ clocks->count = mclk_table->count;
}
return 0;
}
@@ -4324,9 +4476,35 @@ static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
return 0;
}
+static int smu7_request_firmware(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct cgs_firmware_info info = {0};
+
+ ret = cgs_get_firmware_info(hwmgr->device,
+ smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
+ &info);
+ if (ret || !info.kptr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int smu7_release_firmware(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+
+ ret = cgs_rel_firmware(hwmgr->device,
+ smu7_convert_fw_type_to_cgs(UCODE_ID_SMU));
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.backend_init = &smu7_hwmgr_backend_init,
- .backend_fini = &phm_hwmgr_backend_fini,
+ .backend_fini = &smu7_hwmgr_backend_fini,
.asic_setup = &smu7_setup_asic_task,
.dynamic_state_management_enable = &smu7_enable_dpm_tasks,
.apply_state_adjust_rules = smu7_apply_state_adjust_rules,
@@ -4371,6 +4549,8 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_clock_by_type = smu7_get_clock_by_type,
.read_sensor = smu7_read_sensor,
.dynamic_state_management_disable = smu7_disable_dpm_tasks,
+ .request_firmware = smu7_request_firmware,
+ .release_firmware = smu7_release_firmware,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
@@ -4390,7 +4570,7 @@ uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
return i;
}
-int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
+int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
{
int ret = 0;
@@ -4400,7 +4580,6 @@ int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
else if (hwmgr->pp_table_version == PP_TABLE_V1)
hwmgr->pptable_func = &pptable_v1_0_funcs;
- pp_smu7_thermal_initialize(hwmgr);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index 27e7f76ad8a6..f221e17b67e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -268,7 +268,7 @@ struct smu7_hwmgr {
uint32_t fast_watermark_threshold;
/* ---- Phase Shedding ---- */
- bool vddc_phase_shed_control;
+ uint8_t vddc_phase_shed_control;
/* ---- DI/DT ---- */
struct smu7_display_timing display_timing;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 6cd1287a7a8f..1dc31aa72781 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -20,17 +20,19 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "pp_debug.h"
#include "hwmgr.h"
#include "smumgr.h"
#include "smu7_hwmgr.h"
#include "smu7_powertune.h"
-#include "pp_debug.h"
#include "smu7_common.h"
#define VOLTAGE_SCALE 4
static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+static uint32_t Polaris11_DIDTBlock_Info = SQ_PCC_MASK | TCP_IR_MASK | TD_PCC_MASK;
+
static const struct gpu_pt_config_reg GCCACConfig_Polaris10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value Type
@@ -261,9 +263,9 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
@@ -271,12 +273,12 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
@@ -284,8 +286,8 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
@@ -373,55 +375,305 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
{ 0xFFFFFFFF }
};
+static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
-static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
+static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] =
{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x004c, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00d0, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0069, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x0048, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x005f, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x007a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x001f, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x002d, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x0088, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
+{
uint32_t en = enable ? 1 : 0;
+ uint32_t block_en = 0;
int32_t result = 0;
+ uint32_t didt_block;
uint32_t data;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
- DIDTBlock_Info &= ~SQ_Enable_MASK;
- DIDTBlock_Info |= en << SQ_Enable_SHIFT;
- }
+ if (hwmgr->chip_id == CHIP_POLARIS11)
+ didt_block = Polaris11_DIDTBlock_Info;
+ else
+ didt_block = DIDTBlock_Info;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
- DIDTBlock_Info &= ~DB_Enable_MASK;
- DIDTBlock_Info |= en << DB_Enable_SHIFT;
- }
+ block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ? en : 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
- DIDTBlock_Info &= ~TD_Enable_MASK;
- DIDTBlock_Info |= en << TD_Enable_SHIFT;
- }
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
+ data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((block_en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ didt_block &= ~SQ_Enable_MASK;
+ didt_block |= block_en << SQ_Enable_SHIFT;
+
+ block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ? en : 0;
+
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
+ data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((block_en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ didt_block &= ~DB_Enable_MASK;
+ didt_block |= block_en << DB_Enable_SHIFT;
+
+ block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ? en : 0;
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
+ data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((block_en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ didt_block &= ~TD_Enable_MASK;
+ didt_block |= block_en << TD_Enable_SHIFT;
+
+ block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping) ? en : 0;
+
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
+ data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((block_en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ didt_block &= ~TCP_Enable_MASK;
+ didt_block |= block_en << TCP_Enable_SHIFT;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
- DIDTBlock_Info &= ~TCP_Enable_MASK;
- DIDTBlock_Info |= en << TCP_Enable_SHIFT;
- }
if (enable)
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, didt_block);
return result;
}
@@ -498,7 +750,6 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
-
if (result == 0)
num_se = sys_info.value;
@@ -507,7 +758,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
- /* TO DO Pre DIDT disable clock gating */
+ cgs_enter_safe_mode(hwmgr->device, true);
value = 0;
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
for (count = 0; count < num_se; count++) {
@@ -521,10 +772,18 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- } else if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) {
+ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ if (hwmgr->smumgr->is_kicker)
+ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker);
+ else
+ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ } else if (hwmgr->chip_id == CHIP_POLARIS12) {
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
+ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
}
}
@@ -533,7 +792,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
result = smu7_enable_didt(hwmgr, true);
PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
- /* TO DO Post DIDT enable clock gating */
+ if (hwmgr->chip_id == CHIP_POLARIS11) {
+ result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_EnableDpmDidt));
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to enable DPM DIDT.", return result);
+ }
+ cgs_enter_safe_mode(hwmgr->device, false);
}
return 0;
@@ -547,11 +812,20 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
- /* TO DO Pre DIDT disable clock gating */
+
+ cgs_enter_safe_mode(hwmgr->device, true);
result = smu7_enable_didt(hwmgr, false);
- PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
- /* TO DO Post DIDT enable clock gating */
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Post DIDT enable clock gating failed.",
+ return result);
+ if (hwmgr->chip_id == CHIP_POLARIS11) {
+ result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableDpmDidt));
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to disable DPM DIDT.", return result);
+ }
+ cgs_enter_safe_mode(hwmgr->device, false);
}
return 0;
@@ -651,7 +925,7 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
if (smu7_set_power_limit(hwmgr, default_limit))
- printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
+ pr_err("Failed to set Default Power Limit in SMC!");
}
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index 29d0319b22e6..436ca5ce8248 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -506,18 +506,18 @@ static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
static const struct phm_master_table_item
phm_thermal_start_thermal_controller_master_list[] = {
- {NULL, tf_smu7_thermal_initialize},
- {NULL, tf_smu7_thermal_set_temperature_range},
- {NULL, tf_smu7_thermal_enable_alert},
- {NULL, smum_thermal_avfs_enable},
+ { .tableFunction = tf_smu7_thermal_initialize },
+ { .tableFunction = tf_smu7_thermal_set_temperature_range },
+ { .tableFunction = tf_smu7_thermal_enable_alert },
+ { .tableFunction = smum_thermal_avfs_enable },
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
- {NULL, smum_thermal_setup_fan_table},
- {NULL, tf_smu7_thermal_start_smc_fan_control},
- {NULL, NULL}
+ { .tableFunction = smum_thermal_setup_fan_table },
+ { .tableFunction = tf_smu7_thermal_start_smc_fan_control },
+ { }
};
static const struct phm_master_table_header
@@ -529,10 +529,10 @@ phm_thermal_start_thermal_controller_master = {
static const struct phm_master_table_item
phm_thermal_set_temperature_range_master_list[] = {
- {NULL, tf_smu7_thermal_disable_alert},
- {NULL, tf_smu7_thermal_set_temperature_range},
- {NULL, tf_smu7_thermal_enable_alert},
- {NULL, NULL}
+ { .tableFunction = tf_smu7_thermal_disable_alert },
+ { .tableFunction = tf_smu7_thermal_set_temperature_range },
+ { .tableFunction = tf_smu7_thermal_enable_alert },
+ { }
};
static const struct phm_master_table_header
@@ -575,3 +575,9 @@ int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
return result;
}
+void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr)
+{
+ phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
+ phm_destroy_table(hwmgr, &(hwmgr->start_thermal_controller));
+ return;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
index 6face973be43..2ed774db42c7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
@@ -47,6 +47,7 @@ extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
+extern void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 3a883e6c601a..6dd5f0e9ef87 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -29,7 +29,10 @@
#include "amd_shared.h"
#include "cgs_common.h"
-extern int amdgpu_dpm;
+extern const struct amd_ip_funcs pp_ip_funcs;
+extern const struct amd_powerplay_funcs pp_dpm_funcs;
+
+#define PP_DPM_DISABLED 0xCCCC
enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
@@ -135,17 +138,12 @@ enum amd_pp_event {
AMD_PP_EVENT_MAX
};
-enum amd_dpm_forced_level {
- AMD_DPM_FORCED_LEVEL_AUTO = 0,
- AMD_DPM_FORCED_LEVEL_LOW = 1,
- AMD_DPM_FORCED_LEVEL_HIGH = 2,
- AMD_DPM_FORCED_LEVEL_MANUAL = 3,
-};
-
struct amd_pp_init {
struct cgs_device *device;
uint32_t chip_family;
uint32_t chip_id;
+ bool pm_en;
+ uint32_t feature_mask;
};
enum amd_pp_display_config_type{
@@ -371,10 +369,10 @@ struct amd_powerplay {
const struct amd_powerplay_funcs *pp_funcs;
};
-int amd_powerplay_init(struct amd_pp_init *pp_init,
- struct amd_powerplay *amd_pp);
+int amd_powerplay_create(struct amd_pp_init *pp_init,
+ void **handle);
-int amd_powerplay_fini(void *handle);
+int amd_powerplay_destroy(void *handle);
int amd_powerplay_reset(void *handle);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
index d63ef83b2628..7bd8a7e57080 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
@@ -119,7 +119,6 @@ struct pp_eventmgr {
void (*pp_eventmgr_fini)(struct pp_eventmgr *eventmgr);
};
-int eventmgr_init(struct pp_instance *handle);
-int eventmgr_fini(struct pp_eventmgr *eventmgr);
+int eventmgr_early_init(struct pp_instance *handle);
#endif /* _EVENTMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 6cdb7cbf515e..7275a29293eb 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -38,8 +38,6 @@ struct pp_hwmgr;
struct phm_fan_speed_info;
struct pp_atomctrl_voltage_table;
-extern unsigned amdgpu_pp_feature_mask;
-
#define VOLTAGE_SCALE 4
uint8_t convert_to_vid(uint16_t vddc);
@@ -358,6 +356,8 @@ struct pp_hwmgr_func {
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value);
+ int (*request_firmware)(struct pp_hwmgr *hwmgr);
+ int (*release_firmware)(struct pp_hwmgr *hwmgr);
};
struct pp_table_func {
@@ -612,6 +612,7 @@ struct pp_hwmgr {
uint32_t num_vce_state_tables;
enum amd_dpm_forced_level dpm_level;
+ enum amd_dpm_forced_level saved_dpm_level;
bool block_hw_access;
struct phm_gfx_arbiter gfx_arbiter;
struct phm_acp_arbiter acp_arbiter;
@@ -651,19 +652,12 @@ struct pp_hwmgr {
uint32_t feature_mask;
};
-
-extern int hwmgr_init(struct amd_pp_init *pp_init,
- struct pp_instance *handle);
-
-extern int hwmgr_fini(struct pp_hwmgr *hwmgr);
-
-extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr);
-
+extern int hwmgr_early_init(struct pp_instance *handle);
+extern int hwmgr_hw_init(struct pp_instance *handle);
+extern int hwmgr_hw_fini(struct pp_instance *handle);
extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
uint32_t value, uint32_t mask);
-
-
extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
uint32_t index,
@@ -692,11 +686,10 @@ extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level
extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
uint16_t virtual_voltage_id, int32_t *sclk);
extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
-extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
-extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t id, uint16_t *voltage);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index bfdbec10cdd5..072880130cfb 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -24,6 +24,12 @@
#ifndef PP_DEBUG_H
#define PP_DEBUG_H
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "amdgpu: [powerplay] " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -31,7 +37,7 @@
#define PP_ASSERT_WITH_CODE(cond, msg, code) \
do { \
if (!(cond)) { \
- printk("%s\n", msg); \
+ pr_warning("%s\n", msg); \
code; \
} \
} while (0)
@@ -39,7 +45,7 @@
#define PP_DBG_LOG(fmt, ...) \
do { \
- if(0)printk(KERN_INFO "[ pp_dbg ] " fmt, ##__VA_ARGS__); \
+ pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
index 4d8ed1f33de4..ab8494fb5c6b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
@@ -31,6 +31,11 @@
struct pp_instance {
uint32_t pp_valid;
+ uint32_t chip_family;
+ uint32_t chip_id;
+ bool pm_en;
+ uint32_t feature_mask;
+ void *device;
struct pp_smumgr *smu_mgr;
struct pp_hwmgr *hwmgr;
struct pp_eventmgr *eventmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
index bce00096d80d..fbc504c70b8b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -394,6 +394,9 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
+#define PPSMC_MSG_EnableDpmDidt ((uint16_t) 0x309)
+#define PPSMC_MSG_DisableDpmDidt ((uint16_t) 0x30A)
+
#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 2139072065cc..7c318a95e0c2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -33,6 +33,12 @@ struct pp_hwmgr;
#define smu_lower_32_bits(n) ((uint32_t)(n))
#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
+extern const struct pp_smumgr_func cz_smu_funcs;
+extern const struct pp_smumgr_func iceland_smu_funcs;
+extern const struct pp_smumgr_func tonga_smu_funcs;
+extern const struct pp_smumgr_func fiji_smu_funcs;
+extern const struct pp_smumgr_func polaris10_smu_funcs;
+
enum AVFS_BTC_STATUS {
AVFS_BTC_BOOT = 0,
AVFS_BTC_BOOT_STARTEDSMU,
@@ -131,13 +137,10 @@ struct pp_smumgr {
uint32_t usec_timeout;
bool reload_fw;
const struct pp_smumgr_func *smumgr_funcs;
+ bool is_kicker;
};
-
-extern int smum_init(struct amd_pp_init *pp_init,
- struct pp_instance *handle);
-
-extern int smum_fini(struct pp_smumgr *smumgr);
+extern int smum_early_init(struct pp_instance *handle);
extern int smum_get_argument(struct pp_smumgr *smumgr);
@@ -172,13 +175,6 @@ extern int smu_allocate_memory(void *device, uint32_t size,
void **kptr, void *handle);
extern int smu_free_memory(void *device, void *handle);
-
-extern int cz_smum_init(struct pp_smumgr *smumgr);
-extern int iceland_smum_init(struct pp_smumgr *smumgr);
-extern int tonga_smum_init(struct pp_smumgr *smumgr);
-extern int fiji_smum_init(struct pp_smumgr *smumgr);
-extern int polaris10_smum_init(struct pp_smumgr *smumgr);
-
extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 5a44485526d2..1f6744a443d4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -70,7 +70,7 @@ static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr,
result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
if (result != 0) {
- printk(KERN_ERR "[ powerplay ] cz_send_msg_to_smc_async failed\n");
+ pr_err("cz_send_msg_to_smc_async failed\n");
return result;
}
@@ -100,12 +100,12 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
return -EINVAL;
if (0 != (3 & smc_address)) {
- printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n");
+ pr_err("SMC address must be 4 byte aligned\n");
return -EINVAL;
}
if (limit <= (smc_address + 3)) {
- printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n");
+ pr_err("SMC address beyond the SMC RAM area\n");
return -EINVAL;
}
@@ -141,42 +141,6 @@ static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
return cz_send_msg_to_smc(smumgr, msg);
}
-static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
- uint32_t smc_address;
-
- if (!smumgr->reload_fw) {
- printk(KERN_INFO "[ powerplay ] skip reloading...\n");
- return 0;
- }
-
- smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
- offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
-
- cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4);
-
- cz_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_DriverDramAddrHi,
- cz_smu->toc_buffer.mc_addr_high);
-
- cz_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_DriverDramAddrLo,
- cz_smu->toc_buffer.mc_addr_low);
-
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs);
-
- cz_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_aram);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_power_profiling_index);
-
- return cz_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_initialize_index);
-}
-
static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
uint32_t firmware)
{
@@ -198,7 +162,7 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
}
if (i >= smumgr->usec_timeout) {
- printk(KERN_ERR "[ powerplay ] SMU check loaded firmware failed.\n");
+ pr_err("SMU check loaded firmware failed.\n");
return -EINVAL;
}
@@ -250,34 +214,6 @@ static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
return 0;
}
-static int cz_start_smu(struct pp_smumgr *smumgr)
-{
- int ret = 0;
- uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_JT1_MASK |
- UCODE_ID_CP_MEC_JT2_MASK;
-
- if (smumgr->chip_id == CHIP_STONEY)
- fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
-
- ret = cz_request_smu_load_fw(smumgr);
- if (ret)
- printk(KERN_ERR "[ powerplay] SMU firmware load failed\n");
-
- cz_check_fw_load_finish(smumgr, fw_to_check);
-
- ret = cz_load_mec_firmware(smumgr);
- if (ret)
- printk(KERN_ERR "[ powerplay ] Mec Firmware load failed\n");
-
- return ret;
-}
-
static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
enum cz_scratch_entry firmware_enum)
{
@@ -406,7 +342,7 @@ static int cz_smu_populate_single_scratch_task(
break;
if (i >= cz_smu->scratch_buffer_length) {
- printk(KERN_ERR "[ powerplay ] Invalid Firmware Type\n");
+ pr_err("Invalid Firmware Type\n");
return -EINVAL;
}
@@ -443,7 +379,7 @@ static int cz_smu_populate_single_ucode_load_task(
break;
if (i >= cz_smu->driver_buffer_length) {
- printk(KERN_ERR "[ powerplay ] Invalid Firmware Type\n");
+ pr_err("Invalid Firmware Type\n");
return -EINVAL;
}
@@ -729,11 +665,87 @@ static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
return 0;
}
+static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
+{
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
+ uint32_t smc_address;
+
+ if (!smumgr->reload_fw) {
+ pr_info("skip reloading...\n");
+ return 0;
+ }
+
+ cz_smu_populate_firmware_entries(smumgr);
+
+ cz_smu_construct_toc(smumgr);
+
+ smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
+ offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
+
+ cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4);
+
+ cz_send_msg_to_smc_with_parameter(smumgr,
+ PPSMC_MSG_DriverDramAddrHi,
+ cz_smu->toc_buffer.mc_addr_high);
+
+ cz_send_msg_to_smc_with_parameter(smumgr,
+ PPSMC_MSG_DriverDramAddrLo,
+ cz_smu->toc_buffer.mc_addr_low);
+
+ cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs);
+
+ cz_send_msg_to_smc_with_parameter(smumgr,
+ PPSMC_MSG_ExecuteJob,
+ cz_smu->toc_entry_aram);
+ cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_smu->toc_entry_power_profiling_index);
+
+ return cz_send_msg_to_smc_with_parameter(smumgr,
+ PPSMC_MSG_ExecuteJob,
+ cz_smu->toc_entry_initialize_index);
+}
+
+static int cz_start_smu(struct pp_smumgr *smumgr)
+{
+ int ret = 0;
+ uint32_t fw_to_check = 0;
+
+ fw_to_check = UCODE_ID_RLC_G_MASK |
+ UCODE_ID_SDMA0_MASK |
+ UCODE_ID_SDMA1_MASK |
+ UCODE_ID_CP_CE_MASK |
+ UCODE_ID_CP_ME_MASK |
+ UCODE_ID_CP_PFP_MASK |
+ UCODE_ID_CP_MEC_JT1_MASK |
+ UCODE_ID_CP_MEC_JT2_MASK;
+
+ if (smumgr->chip_id == CHIP_STONEY)
+ fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
+
+ ret = cz_request_smu_load_fw(smumgr);
+ if (ret)
+ pr_err("SMU firmware load failed\n");
+
+ cz_check_fw_load_finish(smumgr, fw_to_check);
+
+ ret = cz_load_mec_firmware(smumgr);
+ if (ret)
+ pr_err("Mec Firmware load failed\n");
+
+ return ret;
+}
+
static int cz_smu_init(struct pp_smumgr *smumgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
uint64_t mc_addr = 0;
int ret = 0;
+ struct cz_smumgr *cz_smu;
+
+ cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
+ if (cz_smu == NULL)
+ return -ENOMEM;
+
+ smumgr->backend = cz_smu;
cz_smu->toc_buffer.data_size = 4096;
cz_smu->smu_buffer.data_size =
@@ -769,12 +781,11 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
- cz_smu_populate_firmware_entries(smumgr);
if (0 != cz_smu_populate_single_scratch_entry(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n");
+ pr_err("Error when Populate Firmware Entry.\n");
return -1;
}
@@ -782,14 +793,14 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n");
+ pr_err("Error when Populate Firmware Entry.\n");
return -1;
}
if (0 != cz_smu_populate_single_scratch_entry(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n");
+ pr_err("Error when Populate Firmware Entry.\n");
return -1;
}
@@ -797,7 +808,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
sizeof(struct SMU8_MultimediaPowerLogData),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n");
+ pr_err("Error when Populate Firmware Entry.\n");
return -1;
}
@@ -805,10 +816,9 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
sizeof(struct SMU8_Fusion_ClkTable),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n");
+ pr_err("Error when Populate Firmware Entry.\n");
return -1;
}
- cz_smu_construct_toc(smumgr);
return 0;
}
@@ -827,13 +837,12 @@ static int cz_smu_fini(struct pp_smumgr *smumgr)
cgs_free_gpu_mem(smumgr->device,
cz_smu->smu_buffer.handle);
kfree(cz_smu);
- kfree(smumgr);
}
return 0;
}
-static const struct pp_smumgr_func cz_smu_funcs = {
+const struct pp_smumgr_func cz_smu_funcs = {
.smu_init = cz_smu_init,
.smu_fini = cz_smu_fini,
.start_smu = cz_start_smu,
@@ -847,15 +856,3 @@ static const struct pp_smumgr_func cz_smu_funcs = {
.upload_pptable_settings = cz_upload_pptable_settings,
};
-int cz_smum_init(struct pp_smumgr *smumgr)
-{
- struct cz_smumgr *cz_smu;
-
- cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
- if (cz_smu == NULL)
- return -ENOMEM;
-
- smumgr->backend = cz_smu;
- smumgr->smumgr_funcs = &cz_smu_funcs;
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
index 883818039248..7c3a290c8957 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
@@ -95,8 +95,4 @@ struct cz_smumgr {
struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
};
-struct pp_smumgr;
-
-extern int cz_smum_init(struct pp_smumgr *smumgr);
-
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 6aeb1d20cc3b..0f7a77b7312e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -21,13 +21,13 @@
*
*/
+#include "pp_debug.h"
#include "fiji_smc.h"
#include "smu7_dyn_defaults.h"
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
#include "ppatomctrl.h"
-#include "pp_debug.h"
#include "cgs_common.h"
#include "atombios.h"
#include "fiji_smumgr.h"
@@ -2131,7 +2131,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
+ pr_warning("can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2156,7 +2156,7 @@ uint32_t fiji_get_mac_definition(uint32_t value)
return SMU73_MAX_LEVELS_MVDD;
}
- printk(KERN_WARNING "can't get the mac of %x\n", value);
+ pr_warning("can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 26eff56b4a99..54b347366b5d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -21,6 +21,7 @@
*
*/
+#include "pp_debug.h"
#include "smumgr.h"
#include "smu73.h"
#include "smu_ucode_xfer_vi.h"
@@ -36,7 +37,6 @@
#include "gca/gfx_8_0_d.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
-#include "pp_debug.h"
#include "fiji_pwrvirus.h"
#include "fiji_smc.h"
@@ -179,7 +179,7 @@ static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
result = 0;
break;
default:
- printk(KERN_ERR "Table Exit with Invalid Command!");
+ pr_err("Table Exit with Invalid Command!");
priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL;
result = -1;
break;
@@ -202,13 +202,13 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
result = 0;
} else {
- printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt"
+ pr_err("[AVFS][fiji_start_avfs_btc] Attempt"
" to Enable AVFS Failed!");
smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
result = -1;
}
} else {
- printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] "
+ pr_err("[AVFS][fiji_start_avfs_btc] "
"PerformBTC SMU msg failed");
result = -1;
}
@@ -384,7 +384,7 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
case AVFS_BTC_NOTSUPPORTED: /* Do nothing */
break;
default:
- printk(KERN_ERR "[AVFS] Something is broken. See log!");
+ pr_err("[AVFS] Something is broken. See log!");
break;
}
return 0;
@@ -464,13 +464,20 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
*/
static int fiji_smu_init(struct pp_smumgr *smumgr)
{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
int i;
+ struct fiji_smumgr *fiji_priv = NULL;
+
+ fiji_priv = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL);
+
+ if (fiji_priv == NULL)
+ return -ENOMEM;
+
+ smumgr->backend = fiji_priv;
if (smu7_init(smumgr))
return -EINVAL;
- priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
+ fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
if (fiji_is_hw_avfs_present(smumgr))
/* AVFS Parameter
* 0 - BTC DC disabled, BTC AC disabled
@@ -479,18 +486,18 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
* 3 - BTC DC enabled, BTC AC enabled
* Default is 0 - BTC DC disabled, BTC AC disabled
*/
- priv->avfs.AvfsBtcParam = 0;
+ fiji_priv->avfs.AvfsBtcParam = 0;
else
- priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
+ fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
- priv->activity_target[i] = 30;
+ fiji_priv->activity_target[i] = 30;
return 0;
}
-static const struct pp_smumgr_func fiji_smu_funcs = {
+const struct pp_smumgr_func fiji_smu_funcs = {
.smu_init = &fiji_smu_init,
.smu_fini = &smu7_smu_fini,
.start_smu = &fiji_start_smu,
@@ -513,18 +520,3 @@ static const struct pp_smumgr_func fiji_smu_funcs = {
.initialize_mc_reg_table = fiji_initialize_mc_reg_table,
.is_dpm_running = fiji_is_dpm_running,
};
-
-int fiji_smum_init(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *fiji_smu = NULL;
-
- fiji_smu = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL);
-
- if (fiji_smu == NULL)
- return -ENOMEM;
-
- smumgr->backend = fiji_smu;
- smumgr->smumgr_funcs = &fiji_smu_funcs;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index a24971a33bfd..ad82161df831 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -21,13 +21,13 @@
*
*/
+#include "pp_debug.h"
#include "iceland_smc.h"
#include "smu7_dyn_defaults.h"
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
#include "ppatomctrl.h"
-#include "pp_debug.h"
#include "cgs_common.h"
#include "atombios.h"
#include "pppcielanes.h"
@@ -1545,7 +1545,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
if (0 != result) {
smu_data->smc_state_table.GraphicsBootLevel = 0;
- printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
+ pr_err("VBIOS did not find boot engine clock value \
in dependency table. Using Graphics DPM level 0!");
result = 0;
}
@@ -1556,7 +1556,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
if (0 != result) {
smu_data->smc_state_table.MemoryBootLevel = 0;
- printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
+ pr_err("VBIOS did not find boot engine clock value \
in dependency table. Using Memory DPM level 0!");
result = 0;
}
@@ -2146,7 +2146,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
+ pr_warning("can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2169,7 +2169,7 @@ uint32_t iceland_get_mac_definition(uint32_t value)
return SMU71_MAX_LEVELS_MVDD;
}
- printk(KERN_WARNING "can't get the mac of %x\n", value);
+ pr_warning("can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index eeafefc4acba..0bf2def3b659 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -22,6 +22,7 @@
* Author: Huang Rui <ray.huang@amd.com>
*
*/
+#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -29,7 +30,6 @@
#include "smumgr.h"
#include "iceland_smumgr.h"
-#include "pp_debug.h"
#include "smu_ucode_xfer_vi.h"
#include "ppsmc.h"
#include "smu/smu_7_1_1_d.h"
@@ -176,7 +176,7 @@ static int iceland_start_smu(struct pp_smumgr *smumgr)
return result;
if (!smu7_is_smc_ram_running(smumgr)) {
- printk("smu not running, upload firmware again \n");
+ pr_info("smu not running, upload firmware again \n");
result = iceland_smu_upload_firmware_image(smumgr);
if (result)
return result;
@@ -201,17 +201,25 @@ static int iceland_start_smu(struct pp_smumgr *smumgr)
static int iceland_smu_init(struct pp_smumgr *smumgr)
{
int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct iceland_smumgr *iceland_priv = NULL;
+
+ iceland_priv = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL);
+
+ if (iceland_priv == NULL)
+ return -ENOMEM;
+
+ smumgr->backend = iceland_priv;
+
if (smu7_init(smumgr))
return -EINVAL;
for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
- smu_data->activity_target[i] = 30;
+ iceland_priv->activity_target[i] = 30;
return 0;
}
-static const struct pp_smumgr_func iceland_smu_funcs = {
+const struct pp_smumgr_func iceland_smu_funcs = {
.smu_init = &iceland_smu_init,
.smu_fini = &smu7_smu_fini,
.start_smu = &iceland_start_smu,
@@ -234,17 +242,3 @@ static const struct pp_smumgr_func iceland_smu_funcs = {
.is_dpm_running = iceland_is_dpm_running,
};
-int iceland_smum_init(struct pp_smumgr *smumgr)
-{
- struct iceland_smumgr *iceland_smu = NULL;
-
- iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL);
-
- if (iceland_smu == NULL)
- return -ENOMEM;
-
- smumgr->backend = iceland_smu;
- smumgr->smumgr_funcs = &iceland_smu_funcs;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 5190e821200c..80e2329a1b9e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -21,13 +21,13 @@
*
*/
+#include "pp_debug.h"
#include "polaris10_smc.h"
#include "smu7_dyn_defaults.h"
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
#include "ppatomctrl.h"
-#include "pp_debug.h"
#include "cgs_common.h"
#include "atombios.h"
#include "polaris10_smumgr.h"
@@ -494,6 +494,7 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
state->CcPwrDynRm = 0;
state->CcPwrDynRm1 = 0;
@@ -502,7 +503,10 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
- state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+ if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker)
+ state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
+ else
+ state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
@@ -2180,7 +2184,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
+ pr_warning("can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2207,7 +2211,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
}
- printk(KERN_WARNING "can't get the mac of %x\n", value);
+ pr_warning("can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index f38a68747df0..ce20ae2e520e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -21,6 +21,7 @@
*
*/
+#include "pp_debug.h"
#include "smumgr.h"
#include "smu74.h"
#include "smu_ucode_xfer_vi.h"
@@ -36,7 +37,6 @@
#include "bif/bif_5_0_sh_mask.h"
#include "polaris10_pwrvirus.h"
#include "ppatomctrl.h"
-#include "pp_debug.h"
#include "cgs_common.h"
#include "polaris10_smc.h"
#include "smu7_ppsmc.h"
@@ -84,7 +84,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
break;
default:
- printk("Table Exit with Invalid Command!");
+ pr_info("Table Exit with Invalid Command!");
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
result = -1;
break;
@@ -102,7 +102,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
if (0 != smu_data->avfs.avfs_btc_param) {
if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
- printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
+ pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
}
@@ -189,7 +189,7 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
return -1);
if (smu_data->avfs.avfs_btc_param > 1) {
- printk("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
+ pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
PP_ASSERT_WITH_CODE(-1 == polaris10_setup_pwr_virus(smumgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
@@ -208,7 +208,7 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
break;
default:
- printk("[AVFS] Something is broken. See log!");
+ pr_info("[AVFS] Something is broken. See log!");
break;
}
@@ -328,6 +328,7 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
/* If failed, try with different security Key. */
if (result != 0) {
smu_data->smu7_data.security_hard_key ^= 1;
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
result = polaris10_start_smu_in_protection_mode(smumgr);
}
}
@@ -363,9 +364,15 @@ static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
static int polaris10_smu_init(struct pp_smumgr *smumgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data;
int i;
+ smu_data = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
+ if (smu_data == NULL)
+ return -ENOMEM;
+
+ smumgr->backend = smu_data;
+
if (smu7_init(smumgr))
return -EINVAL;
@@ -380,7 +387,7 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
return 0;
}
-static const struct pp_smumgr_func polaris10_smu_funcs = {
+const struct pp_smumgr_func polaris10_smu_funcs = {
.smu_init = polaris10_smu_init,
.smu_fini = smu7_smu_fini,
.start_smu = polaris10_start_smu,
@@ -403,18 +410,3 @@ static const struct pp_smumgr_func polaris10_smu_funcs = {
.get_mac_definition = polaris10_get_mac_definition,
.is_dpm_running = polaris10_is_dpm_running,
};
-
-int polaris10_smum_init(struct pp_smumgr *smumgr)
-{
- struct polaris10_smumgr *polaris10_smu = NULL;
-
- polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
-
- if (polaris10_smu == NULL)
- return -EINVAL;
-
- smumgr->backend = polaris10_smu;
- smumgr->smumgr_funcs = &polaris10_smu_funcs;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index f49b5487b951..35ac27681415 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -22,12 +22,12 @@
*/
+#include "pp_debug.h"
#include "smumgr.h"
#include "smu_ucode_xfer_vi.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
#include "ppatomctrl.h"
-#include "pp_debug.h"
#include "cgs_common.h"
#include "smu7_ppsmc.h"
#include "smu7_smumgr.h"
@@ -175,7 +175,7 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
- printk("\n failed to send pre message %x ret is %d \n", msg, ret);
+ pr_info("\n failed to send pre message %x ret is %d \n", msg, ret);
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
@@ -184,7 +184,7 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
- printk("\n failed to send message %x ret is %d \n", msg, ret);
+ pr_info("\n failed to send message %x ret is %d \n", msg, ret);
return 0;
}
@@ -225,7 +225,7 @@ int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
- printk("Failed to send Message.\n");
+ pr_info("Failed to send Message.\n");
return 0;
}
@@ -347,7 +347,7 @@ static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
result = UCODE_ID_RLC_G_MASK;
break;
default:
- printk("UCode type is out of range! \n");
+ pr_info("UCode type is out of range! \n");
result = 0;
}
@@ -396,7 +396,7 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
struct SMU_DRAMData_TOC *toc;
if (!smumgr->reload_fw) {
- printk(KERN_INFO "[ powerplay ] skip reloading...\n");
+ pr_info("skip reloading...\n");
return 0;
}
@@ -474,7 +474,7 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
- printk(KERN_ERR "Fail to Request SMU Load uCode");
+ pr_err("Fail to Request SMU Load uCode");
return result;
}
@@ -533,6 +533,8 @@ int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
cgs_get_firmware_info(smumgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
+ smumgr->is_kicker = info.is_kicker;
+
result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
return result;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 6e618aa20719..c0956a4207a9 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -22,6 +22,7 @@
*/
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <drm/amdgpu_drm.h>
#include "pp_instance.h"
@@ -29,44 +30,57 @@
#include "cgs_common.h"
#include "linux/delay.h"
-
-int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
+MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+
+
+int smum_early_init(struct pp_instance *handle)
{
struct pp_smumgr *smumgr;
- if ((handle == NULL) || (pp_init == NULL))
+ if (handle == NULL)
return -EINVAL;
smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL);
if (smumgr == NULL)
return -ENOMEM;
- smumgr->device = pp_init->device;
- smumgr->chip_family = pp_init->chip_family;
- smumgr->chip_id = pp_init->chip_id;
+ smumgr->device = handle->device;
+ smumgr->chip_family = handle->chip_family;
+ smumgr->chip_id = handle->chip_id;
smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
smumgr->reload_fw = 1;
handle->smu_mgr = smumgr;
switch (smumgr->chip_family) {
case AMDGPU_FAMILY_CZ:
- cz_smum_init(smumgr);
+ smumgr->smumgr_funcs = &cz_smu_funcs;
break;
case AMDGPU_FAMILY_VI:
switch (smumgr->chip_id) {
case CHIP_TOPAZ:
- iceland_smum_init(smumgr);
+ smumgr->smumgr_funcs = &iceland_smu_funcs;
break;
case CHIP_TONGA:
- tonga_smum_init(smumgr);
+ smumgr->smumgr_funcs = &tonga_smu_funcs;
break;
case CHIP_FIJI:
- fiji_smum_init(smumgr);
+ smumgr->smumgr_funcs = &fiji_smu_funcs;
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
- polaris10_smum_init(smumgr);
+ smumgr->smumgr_funcs = &polaris10_smu_funcs;
break;
default:
return -EINVAL;
@@ -80,13 +94,6 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
return 0;
}
-int smum_fini(struct pp_smumgr *smumgr)
-{
- kfree(smumgr->device);
- kfree(smumgr);
- return 0;
-}
-
int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
index 2e1493ce1bb5..331b0aba4a13 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -21,13 +21,13 @@
*
*/
+#include "pp_debug.h"
#include "tonga_smc.h"
#include "smu7_dyn_defaults.h"
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
#include "ppatomctrl.h"
-#include "pp_debug.h"
#include "cgs_common.h"
#include "atombios.h"
#include "tonga_smumgr.h"
@@ -656,7 +656,7 @@ int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
}
} else {
if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
- printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0 !");
+ pr_err("Pcie Dpm Enablemask is 0 !");
while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
@@ -1503,7 +1503,7 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
if (result != 0) {
smu_data->smc_state_table.GraphicsBootLevel = 0;
- printk(KERN_ERR "[powerplay] VBIOS did not find boot engine "
+ pr_err("[powerplay] VBIOS did not find boot engine "
"clock value in dependency table. "
"Using Graphics DPM level 0 !");
result = 0;
@@ -1515,7 +1515,7 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
if (result != 0) {
smu_data->smc_state_table.MemoryBootLevel = 0;
- printk(KERN_ERR "[powerplay] VBIOS did not find boot "
+ pr_err("[powerplay] VBIOS did not find boot "
"engine clock value in dependency table."
"Using Memory DPM level 0 !");
result = 0;
@@ -1739,7 +1739,7 @@ static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
config = VR_SVI2_PLANE_2;
table->VRConfig |= config;
} else {
- printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should "
+ pr_err("VDDC and VDDGFX should "
"be both on SVI2 control in splitted mode !\n");
}
} else {
@@ -1752,7 +1752,7 @@ static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
config = VR_SVI2_PLANE_1;
table->VRConfig |= config;
} else {
- printk(KERN_ERR "[ powerplay ] VDDC should be on "
+ pr_err("VDDC should be on "
"SVI2 control in merged mode !\n");
}
}
@@ -2657,7 +2657,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
+ pr_warning("can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2681,7 +2681,7 @@ uint32_t tonga_get_mac_definition(uint32_t value)
case SMU_MAX_LEVELS_MVDD:
return SMU72_MAX_LEVELS_MVDD;
}
- printk(KERN_WARNING "can't get the mac value %x\n", value);
+ pr_warning("can't get the mac value %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index eff9a232e72e..a7d55366f2d2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -27,7 +28,6 @@
#include "smumgr.h"
#include "tonga_smumgr.h"
-#include "pp_debug.h"
#include "smu_ucode_xfer_vi.h"
#include "tonga_ppsmc.h"
#include "smu/smu_7_1_2_d.h"
@@ -84,7 +84,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
/* Check pass/failed indicator */
if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) {
- printk(KERN_ERR "[ powerplay ] SMU Firmware start failed\n");
+ pr_err("SMU Firmware start failed\n");
return -EINVAL;
}
@@ -169,20 +169,25 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
*/
static int tonga_smu_init(struct pp_smumgr *smumgr)
{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ struct tonga_smumgr *tonga_priv = NULL;
+ int i;
+
+ tonga_priv = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL);
+ if (tonga_priv == NULL)
+ return -ENOMEM;
- int i;
+ smumgr->backend = tonga_priv;
if (smu7_init(smumgr))
return -EINVAL;
for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
- smu_data->activity_target[i] = 30;
+ tonga_priv->activity_target[i] = 30;
return 0;
}
-static const struct pp_smumgr_func tonga_smu_funcs = {
+const struct pp_smumgr_func tonga_smu_funcs = {
.smu_init = &tonga_smu_init,
.smu_fini = &smu7_smu_fini,
.start_smu = &tonga_start_smu,
@@ -205,18 +210,3 @@ static const struct pp_smumgr_func tonga_smu_funcs = {
.initialize_mc_reg_table = tonga_initialize_mc_reg_table,
.is_dpm_running = tonga_is_dpm_running,
};
-
-int tonga_smum_init(struct pp_smumgr *smumgr)
-{
- struct tonga_smumgr *tonga_smu = NULL;
-
- tonga_smu = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL);
-
- if (tonga_smu == NULL)
- return -ENOMEM;
-
- smumgr->backend = tonga_smu;
- smumgr->smumgr_funcs = &tonga_smu_funcs;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 7130b044b004..ad9a95916f1f 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -35,7 +35,8 @@ static struct simplefb_format supported_formats[] = {
static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- uint32_t pixel_format = crtc->primary->state->fb->pixel_format;
+ const struct drm_framebuffer *fb = crtc->primary->state->fb;
+ uint32_t pixel_format = fb->format->format;
struct simplefb_format *format = NULL;
int i;
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 0b6eaa49a1db..8d8344ed655e 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -135,8 +135,7 @@ static int arcpgu_load(struct drm_device *drm)
drm_kms_helper_poll_init(drm);
arcpgu->fbdev = drm_fbdev_cma_init(drm, 16,
- drm->mode_config.num_crtc,
- drm->mode_config.num_connector);
+ drm->mode_config.num_connector);
if (IS_ERR(arcpgu->fbdev)) {
ret = PTR_ERR(arcpgu->fbdev);
arcpgu->fbdev = NULL;
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index b69c66b4897e..0ce7f398bcff 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -47,10 +47,7 @@ int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
return ret;
/* Link drm_bridge to encoder */
- bridge->encoder = encoder;
- encoder->bridge = bridge;
-
- ret = drm_bridge_attach(drm, bridge);
+ ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret)
drm_encoder_cleanup(encoder);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 7d4e5aa77195..20ebfb4fbdfa 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -60,11 +60,12 @@ static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc)
{
unsigned int btpp;
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+ const struct drm_framebuffer *fb = crtc->primary->state->fb;
uint32_t pixel_format;
struct simplefb_format *format = NULL;
int i;
- pixel_format = crtc->primary->state->fb->pixel_format;
+ pixel_format = fb->format->format;
for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
if (supported_formats[i].fourcc == pixel_format)
@@ -220,27 +221,28 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct drm_framebuffer *fb = plane->state->fb;
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
u32 src_w, src_h, dest_w, dest_h;
dma_addr_t scanout_start;
- if (!plane->state->fb)
+ if (!fb)
return;
src_w = plane->state->src_w >> 16;
src_h = plane->state->src_h >> 16;
dest_w = plane->state->crtc_w;
dest_h = plane->state->crtc_h;
- gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
- scanout_start = gem->paddr + plane->state->fb->offsets[0] +
- plane->state->crtc_y * plane->state->fb->pitches[0] +
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ scanout_start = gem->paddr + fb->offsets[0] +
+ plane->state->crtc_y * fb->pitches[0] +
plane->state->crtc_x *
- drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
+ fb->format->cpp[0];
hdlcd = plane->dev->dev_private;
- hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
- hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]);
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, fb->pitches[0]);
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index e5f4f4a6546d..4ce4f970920b 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -255,12 +255,6 @@ static int hdlcd_debugfs_init(struct drm_minor *minor)
return drm_debugfs_create_files(hdlcd_debugfs_list,
ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor);
}
-
-static void hdlcd_debugfs_cleanup(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(hdlcd_debugfs_list,
- ARRAY_SIZE(hdlcd_debugfs_list), minor);
-}
#endif
static const struct file_operations fops = {
@@ -303,7 +297,6 @@ static struct drm_driver hdlcd_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = hdlcd_debugfs_init,
- .debugfs_cleanup = hdlcd_debugfs_cleanup,
#endif
.fops = &fops,
.name = "hdlcd",
@@ -356,7 +349,7 @@ static int hdlcd_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ hdlcd->fbdev = drm_fbdev_cma_init(drm, 32,
drm->mode_config.num_connector);
if (IS_ERR(hdlcd->fbdev)) {
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 32f746e31379..8b0672d4aee9 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -22,7 +22,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
@@ -256,6 +255,60 @@ static const struct of_device_id malidp_drm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, malidp_drm_of_match);
+static bool malidp_is_compatible_hw_id(struct malidp_hw_device *hwdev,
+ const struct of_device_id *dev_id)
+{
+ u32 core_id;
+ const char *compatstr_dp500 = "arm,mali-dp500";
+ bool is_dp500;
+ bool dt_is_dp500;
+
+ /*
+ * The DP500 CORE_ID register is in a different location, so check it
+ * first. If the product id field matches, then this is DP500, otherwise
+ * check the DP550/650 CORE_ID register.
+ */
+ core_id = malidp_hw_read(hwdev, MALIDP500_DC_BASE + MALIDP_DE_CORE_ID);
+ /* Offset 0x18 will never read 0x500 on products other than DP500. */
+ is_dp500 = (MALIDP_PRODUCT_ID(core_id) == 0x500);
+ dt_is_dp500 = strnstr(dev_id->compatible, compatstr_dp500,
+ sizeof(dev_id->compatible)) != NULL;
+ if (is_dp500 != dt_is_dp500) {
+ DRM_ERROR("Device-tree expects %s, but hardware %s DP500.\n",
+ dev_id->compatible, is_dp500 ? "is" : "is not");
+ return false;
+ } else if (!dt_is_dp500) {
+ u16 product_id;
+ char buf[32];
+
+ core_id = malidp_hw_read(hwdev,
+ MALIDP550_DC_BASE + MALIDP_DE_CORE_ID);
+ product_id = MALIDP_PRODUCT_ID(core_id);
+ snprintf(buf, sizeof(buf), "arm,mali-dp%X", product_id);
+ if (!strnstr(dev_id->compatible, buf,
+ sizeof(dev_id->compatible))) {
+ DRM_ERROR("Device-tree expects %s, but hardware is DP%03X.\n",
+ dev_id->compatible, product_id);
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool malidp_has_sufficient_address_space(const struct resource *res,
+ const struct of_device_id *dev_id)
+{
+ resource_size_t res_size = resource_size(res);
+ const char *compatstr_dp500 = "arm,mali-dp500";
+
+ if (!strnstr(dev_id->compatible, compatstr_dp500,
+ sizeof(dev_id->compatible)))
+ return res_size >= MALIDP550_ADDR_SPACE_SIZE;
+ else if (res_size < MALIDP500_ADDR_SPACE_SIZE)
+ return false;
+ return true;
+}
+
#define MAX_OUTPUT_CHANNELS 3
static int malidp_bind(struct device *dev)
@@ -266,6 +319,7 @@ static int malidp_bind(struct device *dev)
struct malidp_drm *malidp;
struct malidp_hw_device *hwdev;
struct platform_device *pdev = to_platform_device(dev);
+ struct of_device_id const *dev_id;
/* number of lines for the R, G and B output */
u8 output_width[MAX_OUTPUT_CHANNELS];
int ret = 0, i;
@@ -286,7 +340,6 @@ static int malidp_bind(struct device *dev)
memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
malidp->dev = hwdev;
- INIT_LIST_HEAD(&malidp->event_list);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hwdev->regs = devm_ioremap_resource(dev, res);
@@ -329,6 +382,23 @@ static int malidp_bind(struct device *dev)
clk_prepare_enable(hwdev->aclk);
clk_prepare_enable(hwdev->mclk);
+ dev_id = of_match_device(malidp_drm_of_match, dev);
+ if (!dev_id) {
+ ret = -EINVAL;
+ goto query_hw_fail;
+ }
+
+ if (!malidp_has_sufficient_address_space(res, dev_id)) {
+ DRM_ERROR("Insufficient address space in device-tree.\n");
+ ret = -EINVAL;
+ goto query_hw_fail;
+ }
+
+ if (!malidp_is_compatible_hw_id(hwdev, dev_id)) {
+ ret = -EINVAL;
+ goto query_hw_fail;
+ }
+
ret = hwdev->query_hw(hwdev);
if (ret) {
DRM_ERROR("Invalid HW configuration\n");
@@ -387,7 +457,7 @@ static int malidp_bind(struct device *dev)
drm_mode_config_reset(drm);
- malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ malidp->fbdev = drm_fbdev_cma_init(drm, 32,
drm->mode_config.num_connector);
if (IS_ERR(malidp->fbdev)) {
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 9fc8a2e405e4..dbc617c6e4ef 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -15,12 +15,12 @@
#include <linux/mutex.h>
#include <linux/wait.h>
+#include <drm/drmP.h>
#include "malidp_hw.h"
struct malidp_drm {
struct malidp_hw_device *dev;
struct drm_fbdev_cma *fbdev;
- struct list_head event_list;
struct drm_crtc crtc;
wait_queue_head_t wq;
atomic_t config_valid;
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 4bdf531f7844..488aedf5b58d 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -21,7 +21,7 @@
#include "malidp_drv.h"
#include "malidp_hw.h"
-static const struct malidp_input_format malidp500_de_formats[] = {
+static const struct malidp_format_id malidp500_de_formats[] = {
/* fourcc, layers supporting the format, internal id */
{ DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 },
{ DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 },
@@ -69,21 +69,21 @@ static const struct malidp_input_format malidp500_de_formats[] = {
{ DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
{ DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
-static const struct malidp_input_format malidp550_de_formats[] = {
+static const struct malidp_format_id malidp550_de_formats[] = {
MALIDP_COMMON_FORMATS,
};
static const struct malidp_layer malidp500_layers[] = {
- { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE },
- { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE },
- { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE },
+ { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
+ { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, MALIDP_DE_LG_STRIDE },
+ { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, MALIDP_DE_LG_STRIDE },
};
static const struct malidp_layer malidp550_layers[] = {
- { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE },
- { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE },
- { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE },
- { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE },
+ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
+ { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
+ { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
+ { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 },
};
#define MALIDP_DE_DEFAULT_PREFETCH_START 5
@@ -436,8 +436,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
.irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
.vsync_irq = MALIDP500_DE_IRQ_CONF_VALID,
},
- .input_formats = malidp500_de_formats,
- .n_input_formats = ARRAY_SIZE(malidp500_de_formats),
+ .pixel_formats = malidp500_de_formats,
+ .n_pixel_formats = ARRAY_SIZE(malidp500_de_formats),
.bus_align_bytes = 8,
},
.query_hw = malidp500_query_hw,
@@ -447,6 +447,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
.set_config_valid = malidp500_set_config_valid,
.modeset = malidp500_modeset,
.rotmem_required = malidp500_rotmem_required,
+ .features = MALIDP_DEVICE_LV_HAS_3_STRIDES,
},
[MALIDP_550] = {
.map = {
@@ -469,8 +470,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
.irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
- .input_formats = malidp550_de_formats,
- .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ .pixel_formats = malidp550_de_formats,
+ .n_pixel_formats = ARRAY_SIZE(malidp550_de_formats),
.bus_align_bytes = 8,
},
.query_hw = malidp550_query_hw,
@@ -480,6 +481,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
.rotmem_required = malidp550_rotmem_required,
+ .features = 0,
},
[MALIDP_650] = {
.map = {
@@ -503,8 +505,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
.irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
- .input_formats = malidp550_de_formats,
- .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ .pixel_formats = malidp550_de_formats,
+ .n_pixel_formats = ARRAY_SIZE(malidp550_de_formats),
.bus_align_bytes = 16,
},
.query_hw = malidp650_query_hw,
@@ -514,6 +516,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
.rotmem_required = malidp550_rotmem_required,
+ .features = 0,
},
};
@@ -522,10 +525,10 @@ u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
{
unsigned int i;
- for (i = 0; i < map->n_input_formats; i++) {
- if (((map->input_formats[i].layer & layer_id) == layer_id) &&
- (map->input_formats[i].format == format))
- return map->input_formats[i].id;
+ for (i = 0; i < map->n_pixel_formats; i++) {
+ if (((map->pixel_formats[i].layer & layer_id) == layer_id) &&
+ (map->pixel_formats[i].format == format))
+ return map->pixel_formats[i].id;
}
return MALIDP_INVALID_FORMAT_ID;
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 087e1202db3d..00974b59407d 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -35,7 +35,7 @@ enum {
DE_SMART = BIT(4),
};
-struct malidp_input_format {
+struct malidp_format_id {
u32 format; /* DRM fourcc */
u8 layer; /* bitmask of layers supporting it */
u8 id; /* used internally */
@@ -58,6 +58,7 @@ struct malidp_layer {
u16 id; /* layer ID */
u16 base; /* address offset for the register bank */
u16 ptr; /* address offset for the pointer register */
+ u16 stride_offset; /* Offset to the first stride register. */
};
/* regmap features */
@@ -85,14 +86,18 @@ struct malidp_hw_regmap {
const struct malidp_irq_map se_irq_map;
const struct malidp_irq_map dc_irq_map;
- /* list of supported input formats for each layer */
- const struct malidp_input_format *input_formats;
- const u8 n_input_formats;
+ /* list of supported pixel formats for each layer */
+ const struct malidp_format_id *pixel_formats;
+ const u8 n_pixel_formats;
/* pitch alignment requirement in bytes */
const u8 bus_align_bytes;
};
+/* device features */
+/* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */
+#define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0)
+
struct malidp_hw_device {
const struct malidp_hw_regmap map;
void __iomem *regs;
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 63eec8f37cfc..414aada10fe5 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -11,6 +11,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -36,7 +37,6 @@
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
#define MALIDP_LAYER_COMP_SIZE 0x010
#define MALIDP_LAYER_OFFSET 0x014
-#define MALIDP_LAYER_STRIDE 0x018
/*
* This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -67,13 +67,14 @@ drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
return NULL;
state = kmalloc(sizeof(*state), GFP_KERNEL);
- if (state) {
- m_state = to_malidp_plane_state(plane->state);
- __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
- state->rotmem_size = m_state->rotmem_size;
- state->format = m_state->format;
- state->n_planes = m_state->n_planes;
- }
+ if (!state)
+ return NULL;
+
+ m_state = to_malidp_plane_state(plane->state);
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+ state->rotmem_size = m_state->rotmem_size;
+ state->format = m_state->format;
+ state->n_planes = m_state->n_planes;
return &state->base;
}
@@ -102,8 +103,10 @@ static int malidp_de_plane_check(struct drm_plane *plane,
{
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(state);
+ struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb;
- int i;
+ struct drm_rect clip = { 0 };
+ int i, ret;
u32 src_w, src_h;
if (!state->crtc || !state->fb)
@@ -112,11 +115,11 @@ static int malidp_de_plane_check(struct drm_plane *plane,
fb = state->fb;
ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
- fb->pixel_format);
+ fb->format->format);
if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
- ms->n_planes = drm_format_num_planes(fb->pixel_format);
+ ms->n_planes = fb->format->num_planes;
for (i = 0; i < ms->n_planes; i++) {
if (!malidp_hw_pitch_valid(mp->hwdev, fb->pitches[i])) {
DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
@@ -131,23 +134,42 @@ static int malidp_de_plane_check(struct drm_plane *plane,
if ((state->crtc_w > mp->hwdev->max_line_size) ||
(state->crtc_h > mp->hwdev->max_line_size) ||
(state->crtc_w < mp->hwdev->min_line_size) ||
- (state->crtc_h < mp->hwdev->min_line_size) ||
- (state->crtc_w != src_w) || (state->crtc_h != src_h))
+ (state->crtc_h < mp->hwdev->min_line_size))
+ return -EINVAL;
+
+ /*
+ * DP550/650 video layers can accept 3 plane formats only if
+ * fb->pitches[1] == fb->pitches[2] since they don't have a
+ * third plane stride register.
+ */
+ if (ms->n_planes == 3 &&
+ !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
+ (state->fb->pitches[1] != state->fb->pitches[2]))
return -EINVAL;
/* packed RGB888 / BGR888 can't be rotated or flipped */
if (state->rotation != DRM_ROTATE_0 &&
- (state->fb->pixel_format == DRM_FORMAT_RGB888 ||
- state->fb->pixel_format == DRM_FORMAT_BGR888))
+ (fb->format->format == DRM_FORMAT_RGB888 ||
+ fb->format->format == DRM_FORMAT_BGR888))
return -EINVAL;
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+ ret = drm_plane_helper_check_state(state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret)
+ return ret;
+
ms->rotmem_size = 0;
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
state->crtc_w,
- state->fb->pixel_format);
+ fb->format->format);
if (val < 0)
return val;
@@ -157,6 +179,25 @@ static int malidp_de_plane_check(struct drm_plane *plane,
return 0;
}
+static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
+ int num_planes, unsigned int pitches[3])
+{
+ int i;
+ int num_strides = num_planes;
+
+ if (!mp->layer->stride_offset)
+ return;
+
+ if (num_planes == 3)
+ num_strides = (mp->hwdev->features &
+ MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
+
+ for (i = 0; i < num_strides; ++i)
+ malidp_hw_write(mp->hwdev, pitches[i],
+ mp->layer->base +
+ mp->layer->stride_offset + i * 4);
+}
+
static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -174,13 +215,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
/* convert src values from Q16 fixed point to integer */
src_w = plane->state->src_w >> 16;
src_h = plane->state->src_h >> 16;
- if (plane->state->rotation & MALIDP_ROTATED_MASK) {
- dest_w = plane->state->crtc_h;
- dest_h = plane->state->crtc_w;
- } else {
- dest_w = plane->state->crtc_w;
- dest_h = plane->state->crtc_h;
- }
+ dest_w = plane->state->crtc_w;
+ dest_h = plane->state->crtc_h;
malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
@@ -189,11 +225,12 @@ static void malidp_de_plane_update(struct drm_plane *plane,
ptr = mp->layer->ptr + (i << 4);
obj = drm_fb_cma_get_gem_obj(plane->state->fb, i);
+ obj->paddr += plane->state->fb->offsets[i];
malidp_hw_write(mp->hwdev, lower_32_bits(obj->paddr), ptr);
malidp_hw_write(mp->hwdev, upper_32_bits(obj->paddr), ptr + 4);
- malidp_hw_write(mp->hwdev, plane->state->fb->pitches[i],
- mp->layer->base + MALIDP_LAYER_STRIDE);
}
+ malidp_de_set_plane_pitches(mp, ms->n_planes,
+ plane->state->fb->pitches);
malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
mp->layer->base + MALIDP_LAYER_SIZE);
@@ -211,11 +248,12 @@ static void malidp_de_plane_update(struct drm_plane *plane,
/* setup the rotation and axis flip bits */
if (plane->state->rotation & DRM_ROTATE_MASK)
- val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
+ val |= ilog2(plane->state->rotation & DRM_ROTATE_MASK) <<
+ LAYER_ROT_OFFSET;
if (plane->state->rotation & DRM_REFLECT_X)
- val |= LAYER_V_FLIP;
- if (plane->state->rotation & DRM_REFLECT_Y)
val |= LAYER_H_FLIP;
+ if (plane->state->rotation & DRM_REFLECT_Y)
+ val |= LAYER_V_FLIP;
/*
* always enable pixel alpha blending until we have a way to change
@@ -258,7 +296,7 @@ int malidp_de_planes_init(struct drm_device *drm)
u32 *formats;
int ret, i, j, n;
- formats = kcalloc(map->n_input_formats, sizeof(*formats), GFP_KERNEL);
+ formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
if (!formats) {
ret = -ENOMEM;
goto cleanup;
@@ -274,9 +312,9 @@ int malidp_de_planes_init(struct drm_device *drm)
}
/* build the list of DRM supported formats based on the map */
- for (n = 0, j = 0; j < map->n_input_formats; j++) {
- if ((map->input_formats[j].layer & id) == id)
- formats[n++] = map->input_formats[j].format;
+ for (n = 0, j = 0; j < map->n_pixel_formats; j++) {
+ if ((map->pixel_formats[j].layer & id) == id)
+ formats[n++] = map->pixel_formats[j].format;
}
plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 73fecb38f955..aff6d4a84e99 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -81,6 +81,10 @@
#define MALIDP_DE_SYNC_WIDTH 0x8
#define MALIDP_DE_HV_ACTIVE 0xc
+/* Stride register offsets relative to Lx_BASE */
+#define MALIDP_DE_LG_STRIDE 0x18
+#define MALIDP_DE_LV_STRIDE0 0x18
+
/* macros to set values into registers */
#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
#define MALIDP_DE_H_BACKPORCH(x) (((x) & 0x3ff) << 16)
@@ -92,7 +96,10 @@
#define MALIDP_DE_H_ACTIVE(x) (((x) & 0x1fff) << 0)
#define MALIDP_DE_V_ACTIVE(x) (((x) & 0x1fff) << 16)
+#define MALIDP_PRODUCT_ID(__core_id) ((u32)(__core_id) >> 16)
+
/* register offsets and bits specific to DP500 */
+#define MALIDP500_ADDR_SPACE_SIZE 0x01000
#define MALIDP500_DC_BASE 0x00000
#define MALIDP500_DC_CONTROL 0x0000c
#define MALIDP500_DC_CONFIG_REQ (1 << 17)
@@ -125,6 +132,7 @@
#define MALIDP500_CONFIG_ID 0x00fd4
/* register offsets and bits specific to DP550/DP650 */
+#define MALIDP550_ADDR_SPACE_SIZE 0x10000
#define MALIDP550_DE_CONTROL 0x00010
#define MALIDP550_DE_LINE_COUNTER 0x00014
#define MALIDP550_DE_AXI_CONTROL 0x00018
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
index 15f3ecfb16f1..eafaeeb7b5b1 100644
--- a/drivers/gpu/drm/armada/Kconfig
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -1,6 +1,6 @@
config DRM_ARMADA
tristate "DRM support for Marvell Armada SoCs"
- depends on DRM && HAVE_CLK && ARM
+ depends on DRM && HAVE_CLK && ARM && MMU
select DRM_KMS_HELPER
help
Support the "LCD" controllers found on the Marvell Armada 510
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index a18f156c8b66..64c0b4546fb2 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -4,3 +4,5 @@ armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
obj-$(CONFIG_DRM_ARMADA) := armada.o
+
+CFLAGS_armada_trace.o := -I$(src)
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 95cb3966b2ca..e62ee4498ce4 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -169,8 +169,7 @@ void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
int x, int y)
{
u32 addr = drm_fb_obj(fb)->dev_addr;
- u32 pixel_format = fb->pixel_format;
- int num_planes = drm_format_num_planes(pixel_format);
+ int num_planes = fb->format->num_planes;
int i;
if (num_planes > 3)
@@ -178,7 +177,7 @@ void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
for (i = 0; i < num_planes; i++)
addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
- x * drm_format_plane_cpp(pixel_format, i);
+ x * fb->format->cpp[i];
for (; i < 3; i++)
addrs[i] = 0;
}
@@ -191,7 +190,7 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
unsigned i = 0;
DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
- pitch, x, y, fb->bits_per_pixel);
+ pitch, x, y, fb->format->cpp[0] * 8);
armada_drm_plane_calc_addrs(addrs, fb, x, y);
@@ -1036,7 +1035,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
int ret;
/* We don't support changing the pixel format */
- if (fb->pixel_format != crtc->primary->fb->pixel_format)
+ if (fb->format != crtc->primary->fb->format)
return -EINVAL;
work = kmalloc(sizeof(*work), GFP_KERNEL);
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index 90222e60d2d6..a8020cf9da2e 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -19,13 +19,13 @@ static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct armada_private *priv = dev->dev_private;
- int ret;
+ struct drm_printer p = drm_seq_file_printer(m);
mutex_lock(&priv->linear_lock);
- ret = drm_mm_dump_table(m, &priv->linear);
+ drm_mm_print(&priv->linear, &p);
mutex_unlock(&priv->linear_lock);
- return ret;
+ return 0;
}
static int armada_debugfs_reg_show(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 07086b427c22..63f42d001f33 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -203,12 +203,6 @@ static int armada_drm_bind(struct device *dev)
armada_drm_debugfs_init(priv->drm.primary);
#endif
- DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
- armada_drm_driver.name, armada_drm_driver.major,
- armada_drm_driver.minor, armada_drm_driver.patchlevel,
- armada_drm_driver.date, dev_name(dev),
- priv->drm.primary->index);
-
return 0;
err_poll:
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index f03c212b754d..2a7eb6817c36 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -81,7 +81,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
dfb->mod = config;
dfb->obj = obj;
- drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
+ drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
if (ret) {
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index c5dc06a55883..0233e1dc33e1 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -89,11 +89,12 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
info->screen_base = ptr;
fbh->fb = &dfb->fb;
- drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
+ drm_fb_helper_fill_fix(info, dfb->fb.pitches[0],
+ dfb->fb.format->depth);
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
- dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
+ dfb->fb.width, dfb->fb.height, dfb->fb.format->cpp[0] * 8,
(unsigned long long)obj->phys_addr);
return 0;
@@ -136,7 +137,7 @@ int armada_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, fbh, 1, 1);
+ ret = drm_fb_helper_init(dev, fbh, 1);
if (ret) {
DRM_ERROR("failed to initialize drm fb helper\n");
goto err_fb_helper;
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index a293c8be232c..560d416deab2 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -148,8 +148,8 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
return -ENOSPC;
mutex_lock(&priv->linear_lock);
- ret = drm_mm_insert_node(&priv->linear, node, size, align,
- DRM_MM_SEARCH_DEFAULT);
+ ret = drm_mm_insert_node_generic(&priv->linear, node,
+ size, align, 0, 0);
mutex_unlock(&priv->linear_lock);
if (ret) {
kfree(node);
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 6743615232f5..34cb73d0db77 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -186,9 +186,9 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
- pixel_format = fb->pixel_format;
+ pixel_format = fb->format->format;
hsub = drm_format_horz_chroma_subsampling(pixel_format);
- num_planes = drm_format_num_planes(pixel_format);
+ num_planes = fb->format->num_planes;
/*
* Annoyingly, shifting a YUYV-format image by one pixel
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index 15f6ce7acb2a..9647e1f07088 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -1,6 +1,6 @@
config DRM_AST
tristate "AST server chips"
- depends on DRM && PCI
+ depends on DRM && PCI && MMU
select DRM_TTM
select DRM_KMS_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 7abda94fc2cf..5a8fa1c85229 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -28,6 +28,7 @@
#ifndef __AST_DRV_H__
#define __AST_DRV_H__
+#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/ttm/ttm_bo_api.h>
@@ -122,7 +123,7 @@ struct ast_private {
};
int ast_driver_load(struct drm_device *dev, unsigned long flags);
-int ast_driver_unload(struct drm_device *dev);
+void ast_driver_unload(struct drm_device *dev);
struct ast_gem_object;
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index d6f5ec64c667..5d0ffab411a8 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -49,7 +49,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
struct drm_gem_object *obj;
struct ast_bo *bo;
int src_offset, dst_offset;
- int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
+ int bpp = afbdev->afb.base.format->cpp[0];
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
@@ -237,7 +237,7 @@ static int astfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
info->screen_base = sysram;
@@ -315,8 +315,7 @@ int ast_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, &afbdev->helper,
- 1, 1);
+ ret = drm_fb_helper_init(dev, &afbdev->helper, 1);
if (ret)
goto free;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 533e762d036d..993909430736 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -323,7 +323,7 @@ int ast_framebuffer_init(struct drm_device *dev,
{
int ret;
- drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &ast_fb->base, mode_cmd);
ast_fb->obj = obj;
ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
if (ret) {
@@ -488,7 +488,7 @@ out_free:
return ret;
}
-int ast_driver_unload(struct drm_device *dev)
+void ast_driver_unload(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
@@ -501,7 +501,6 @@ int ast_driver_unload(struct drm_device *dev)
pci_iounmap(dev->pdev, ast->ioregs);
pci_iounmap(dev->pdev, ast->regs);
kfree(ast);
- return 0;
}
int ast_gem_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index e26c98f51eb4..606cb40f6c7c 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -79,12 +79,13 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
struct ast_vbios_mode_info *vbios_mode)
{
struct ast_private *ast = crtc->dev->dev_private;
+ const struct drm_framebuffer *fb = crtc->primary->fb;
u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
u32 hborder, vborder;
bool check_sync;
struct ast_vbios_enhtable *best = NULL;
- switch (crtc->primary->fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 8:
vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
color_index = VGAModeIndex - 1;
@@ -207,7 +208,8 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00);
if (vbios_mode->enh_table->flags & NewModeInfo) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92,
+ fb->format->cpp[0] * 8);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
@@ -369,10 +371,11 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod
static void ast_set_offset_reg(struct drm_crtc *crtc)
{
struct ast_private *ast = crtc->dev->dev_private;
+ const struct drm_framebuffer *fb = crtc->primary->fb;
u16 offset;
- offset = crtc->primary->fb->pitches[0] >> 3;
+ offset = fb->pitches[0] >> 3;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
}
@@ -395,9 +398,10 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
struct ast_vbios_mode_info *vbios_mode)
{
struct ast_private *ast = crtc->dev->dev_private;
+ const struct drm_framebuffer *fb = crtc->primary->fb;
u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
- switch (crtc->primary->fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 8:
jregA0 = 0x70;
jregA3 = 0x01;
@@ -452,7 +456,9 @@ static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mo
static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
- switch (crtc->primary->fb->bits_per_pixel) {
+ const struct drm_framebuffer *fb = crtc->primary->fb;
+
+ switch (fb->format->cpp[0] * 8) {
case 8:
break;
default:
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 2a1368fac1d1..50c910efa13d 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -236,8 +236,6 @@ struct ttm_bo_driver ast_bo_driver = {
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
- .lru_tail = &ttm_bo_default_lru_tail,
- .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int ast_mm_init(struct ast_private *ast)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index cbd0070265c9..427bdff425c2 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -431,15 +431,8 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
- if (dc->fbdev) {
+ if (dc->fbdev)
drm_fbdev_cma_hotplug_event(dc->fbdev);
- } else {
- dc->fbdev = drm_fbdev_cma_init(dev, 24,
- dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
- if (IS_ERR(dc->fbdev))
- dc->fbdev = NULL;
- }
}
struct atmel_hlcdc_dc_commit {
@@ -653,10 +646,12 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
platform_set_drvdata(pdev, dev);
- drm_kms_helper_poll_init(dev);
+ dc->fbdev = drm_fbdev_cma_init(dev, 24,
+ dev->mode_config.num_connector);
+ if (IS_ERR(dc->fbdev))
+ dc->fbdev = NULL;
- /* force connectors detection */
- drm_helper_hpd_irq_event(dev);
+ drm_kms_helper_poll_init(dev);
return 0;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
index 377e43cea9dd..63dfdbf34f80 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
@@ -446,7 +446,7 @@ void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
return;
if (fb)
- nplanes = drm_format_num_planes(fb->pixel_format);
+ nplanes = fb->format->num_planes;
if (nplanes > layer->max_planes)
return;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 6119b5085501..e7799b6ee829 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -230,9 +230,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
of_node_put(np);
if (bridge) {
- output->encoder.bridge = bridge;
- bridge->encoder = &output->encoder;
- ret = drm_bridge_attach(dev, bridge);
+ ret = drm_bridge_attach(&output->encoder, bridge, NULL);
if (!ret)
return 0;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 246ed1e33d8a..bd2791c4b002 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -356,7 +356,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL |
ATMEL_HLCDC_LAYER_ITER;
- if (atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format))
+ if (atmel_hlcdc_format_embeds_alpha(state->base.fb->format->format))
cfg |= ATMEL_HLCDC_LAYER_LAEN;
else
cfg |= ATMEL_HLCDC_LAYER_GAEN |
@@ -386,13 +386,13 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
u32 cfg;
int ret;
- ret = atmel_hlcdc_format_to_plane_mode(state->base.fb->pixel_format,
+ ret = atmel_hlcdc_format_to_plane_mode(state->base.fb->format->format,
&cfg);
if (ret)
return;
- if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
- state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
+ if ((state->base.fb->format->format == DRM_FORMAT_YUV422 ||
+ state->base.fb->format->format == DRM_FORMAT_NV61) &&
drm_rotation_90_or_270(state->base.rotation))
cfg |= ATMEL_HLCDC_YUV422ROT;
@@ -405,7 +405,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
* Rotation optimization is not working on RGB888 (rotation is still
* working but without any optimization).
*/
- if (state->base.fb->pixel_format == DRM_FORMAT_RGB888)
+ if (state->base.fb->format->format == DRM_FORMAT_RGB888)
cfg = ATMEL_HLCDC_LAYER_DMA_ROTDIS;
else
cfg = 0;
@@ -514,7 +514,7 @@ atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
ovl_state = drm_plane_state_to_atmel_hlcdc_plane_state(ovl_s);
if (!ovl_s->fb ||
- atmel_hlcdc_format_embeds_alpha(ovl_s->fb->pixel_format) ||
+ atmel_hlcdc_format_embeds_alpha(ovl_s->fb->format->format) ||
ovl_state->alpha != 255)
continue;
@@ -621,7 +621,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
state->src_w >>= 16;
state->src_h >>= 16;
- state->nplanes = drm_format_num_planes(fb->pixel_format);
+ state->nplanes = fb->format->num_planes;
if (state->nplanes > ATMEL_HLCDC_MAX_PLANES)
return -EINVAL;
@@ -664,15 +664,15 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * state->src_h,
state->crtc_h);
- hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+ hsub = drm_format_horz_chroma_subsampling(fb->format->format);
+ vsub = drm_format_vert_chroma_subsampling(fb->format->format);
for (i = 0; i < state->nplanes; i++) {
unsigned int offset = 0;
int xdiv = i ? hsub : 1;
int ydiv = i ? vsub : 1;
- state->bpp[i] = drm_format_plane_cpp(fb->pixel_format, i);
+ state->bpp[i] = fb->format->cpp[i];
if (!state->bpp[i])
return -EINVAL;
@@ -741,7 +741,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) &&
(!layout->memsize ||
- atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format)))
+ atmel_hlcdc_format_embeds_alpha(state->base.fb->format->format)))
return -EINVAL;
if (state->crtc_x < 0 || state->crtc_y < 0)
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index f739763f47ce..bd2718015cdb 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -1,6 +1,6 @@
config DRM_BOCHS
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
- depends on DRM && PCI
+ depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_TTM
help
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 32dfe418cc98..f626bab7f5e3 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -4,6 +4,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 15a293e65b31..aa342515ddf4 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -12,6 +12,10 @@
#include "bochs.h"
+static int bochs_modeset = -1;
+module_param_named(modeset, bochs_modeset, int, 0444);
+MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting");
+
static bool enable_fbdev = true;
module_param_named(fbdev, enable_fbdev, bool, 0444);
MODULE_PARM_DESC(fbdev, "register fbdev device");
@@ -19,7 +23,7 @@ MODULE_PARM_DESC(fbdev, "register fbdev device");
/* ---------------------------------------------------------------------- */
/* drm interface */
-static int bochs_unload(struct drm_device *dev)
+static void bochs_unload(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
@@ -29,7 +33,6 @@ static int bochs_unload(struct drm_device *dev)
bochs_hw_fini(dev);
kfree(bochs);
dev->dev_private = NULL;
- return 0;
}
static int bochs_load(struct drm_device *dev, unsigned long flags)
@@ -215,6 +218,12 @@ static struct pci_driver bochs_pci_driver = {
static int __init bochs_init(void)
{
+ if (vgacon_text_force() && bochs_modeset == -1)
+ return -EINVAL;
+
+ if (bochs_modeset == 0)
+ return -EINVAL;
+
return drm_pci_init(&bochs_driver, &bochs_pci_driver);
}
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index da790a1c302a..932a769637ef 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -123,7 +123,7 @@ static int bochsfb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT;
info->fbops = &bochsfb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width,
sizes->fb_height);
@@ -169,8 +169,7 @@ int bochs_fbdev_init(struct bochs_device *bochs)
drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
&bochs_fb_helper_funcs);
- ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
- 1, 1);
+ ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 099a3c688c26..857755ac2d70 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -205,8 +205,6 @@ struct ttm_bo_driver bochs_bo_driver = {
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
- .lru_tail = &ttm_bo_default_lru_tail,
- .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int bochs_mm_init(struct bochs_device *bochs)
@@ -484,7 +482,7 @@ int bochs_framebuffer_init(struct drm_device *dev,
{
int ret;
- drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
gfb->obj = obj;
ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs);
if (ret) {
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 992d76ce02bb..fe18a5d2d84b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -12,6 +12,7 @@
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_mipi_dsi.h>
@@ -317,6 +318,8 @@ struct adv7511 {
bool edid_read;
wait_queue_head_t wq;
+ struct work_struct hpd_work;
+
struct drm_bridge bridge;
struct drm_connector connector;
@@ -329,6 +332,9 @@ struct adv7511 {
struct gpio_desc *gpio_pd;
+ struct regulator_bulk_data *supplies;
+ unsigned int num_supplies;
+
/* ADV7533 DSI RX related params */
struct device_node *host_node;
struct mipi_dsi_device *dsi;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 8dba729f6ef9..f75ab6278113 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -325,7 +325,7 @@ static void adv7511_set_link_config(struct adv7511 *adv7511,
adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
}
-static void adv7511_power_on(struct adv7511 *adv7511)
+static void __adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
@@ -338,7 +338,7 @@ static void adv7511_power_on(struct adv7511 *adv7511)
* Still, let's be safe and stick to the documentation.
*/
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
- ADV7511_INT0_EDID_READY);
+ ADV7511_INT0_EDID_READY | ADV7511_INT0_HPD);
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
ADV7511_INT1_DDC_ERROR);
}
@@ -354,6 +354,11 @@ static void adv7511_power_on(struct adv7511 *adv7511)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
ADV7511_REG_POWER2_HPD_SRC_MASK,
ADV7511_REG_POWER2_HPD_SRC_NONE);
+}
+
+static void adv7511_power_on(struct adv7511 *adv7511)
+{
+ __adv7511_power_on(adv7511);
/*
* Most of the registers are reset during power down or when HPD is low.
@@ -362,21 +367,23 @@ static void adv7511_power_on(struct adv7511 *adv7511)
if (adv7511->type == ADV7533)
adv7533_dsi_power_on(adv7511);
-
adv7511->powered = true;
}
-static void adv7511_power_off(struct adv7511 *adv7511)
+static void __adv7511_power_off(struct adv7511 *adv7511)
{
/* TODO: setup additional power down modes */
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN,
ADV7511_POWER_POWER_DOWN);
regcache_mark_dirty(adv7511->regmap);
+}
+static void adv7511_power_off(struct adv7511 *adv7511)
+{
+ __adv7511_power_off(adv7511);
if (adv7511->type == ADV7533)
adv7533_dsi_power_off(adv7511);
-
adv7511->powered = false;
}
@@ -402,6 +409,27 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
+static void adv7511_hpd_work(struct work_struct *work)
+{
+ struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
+ enum drm_connector_status status;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ status = connector_status_disconnected;
+ else if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ if (adv7511->connector.status != status) {
+ adv7511->connector.status = status;
+ drm_kms_helper_hotplug_event(adv7511->connector.dev);
+ }
+}
+
static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
@@ -419,7 +447,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder)
- drm_helper_hpd_irq_event(adv7511->connector.dev);
+ schedule_work(&adv7511->hpd_work);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
@@ -546,23 +574,20 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, 0);
- if (adv7511->i2c_main->irq) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
- ADV7511_INT1_DDC_ERROR);
- }
- adv7511->current_edid_segment = -1;
+ unsigned int edid_i2c_addr =
+ (adv7511->i2c_main->addr << 1) + 4;
+
+ __adv7511_power_on(adv7511);
+
+ /* Reset the EDID_I2C_ADDR register as it might be cleared */
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+ edid_i2c_addr);
}
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
if (!adv7511->powered)
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN,
- ADV7511_POWER_POWER_DOWN);
+ __adv7511_power_off(adv7511);
kfree(adv7511->edid);
adv7511->edid = edid;
@@ -825,6 +850,10 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
if (adv->type == ADV7533)
ret = adv7533_attach_dsi(adv);
+ if (adv->i2c_main->irq)
+ regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_HPD);
+
return ret;
}
@@ -839,6 +868,58 @@ static struct drm_bridge_funcs adv7511_bridge_funcs = {
* Probe & remove
*/
+static const char * const adv7511_supply_names[] = {
+ "avdd",
+ "dvdd",
+ "pvdd",
+ "bgvdd",
+ "dvdd-3v",
+};
+
+static const char * const adv7533_supply_names[] = {
+ "avdd",
+ "dvdd",
+ "pvdd",
+ "a2vdd",
+ "v3p3",
+ "v1p2",
+};
+
+static int adv7511_init_regulators(struct adv7511 *adv)
+{
+ struct device *dev = &adv->i2c_main->dev;
+ const char * const *supply_names;
+ unsigned int i;
+ int ret;
+
+ if (adv->type == ADV7511) {
+ supply_names = adv7511_supply_names;
+ adv->num_supplies = ARRAY_SIZE(adv7511_supply_names);
+ } else {
+ supply_names = adv7533_supply_names;
+ adv->num_supplies = ARRAY_SIZE(adv7533_supply_names);
+ }
+
+ adv->supplies = devm_kcalloc(dev, adv->num_supplies,
+ sizeof(*adv->supplies), GFP_KERNEL);
+ if (!adv->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < adv->num_supplies; i++)
+ adv->supplies[i].supply = supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, adv->num_supplies, adv->supplies);
+ if (ret)
+ return ret;
+
+ return regulator_bulk_enable(adv->num_supplies, adv->supplies);
+}
+
+static void adv7511_uninit_regulators(struct adv7511 *adv)
+{
+ regulator_bulk_disable(adv->num_supplies, adv->supplies);
+}
+
static int adv7511_parse_dt(struct device_node *np,
struct adv7511_link_config *config)
{
@@ -939,6 +1020,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511)
return -ENOMEM;
+ adv7511->i2c_main = i2c;
adv7511->powered = false;
adv7511->status = connector_status_disconnected;
@@ -956,13 +1038,21 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (ret)
return ret;
+ ret = adv7511_init_regulators(adv7511);
+ if (ret) {
+ dev_err(dev, "failed to init regulators\n");
+ return ret;
+ }
+
/*
* The power down GPIO is optional. If present, toggle it from active to
* inactive to wake up the encoder.
*/
adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
- if (IS_ERR(adv7511->gpio_pd))
- return PTR_ERR(adv7511->gpio_pd);
+ if (IS_ERR(adv7511->gpio_pd)) {
+ ret = PTR_ERR(adv7511->gpio_pd);
+ goto uninit_regulators;
+ }
if (adv7511->gpio_pd) {
mdelay(5);
@@ -970,12 +1060,14 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
}
adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
- if (IS_ERR(adv7511->regmap))
- return PTR_ERR(adv7511->regmap);
+ if (IS_ERR(adv7511->regmap)) {
+ ret = PTR_ERR(adv7511->regmap);
+ goto uninit_regulators;
+ }
ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
if (ret)
- return ret;
+ goto uninit_regulators;
dev_dbg(dev, "Rev. %d\n", val);
if (adv7511->type == ADV7511)
@@ -985,7 +1077,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
else
ret = adv7533_patch_registers(adv7511);
if (ret)
- return ret;
+ goto uninit_regulators;
regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
@@ -995,10 +1087,11 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511_packet_disable(adv7511, 0xffff);
- adv7511->i2c_main = i2c;
adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
- if (!adv7511->i2c_edid)
- return -ENOMEM;
+ if (!adv7511->i2c_edid) {
+ ret = -ENOMEM;
+ goto uninit_regulators;
+ }
if (adv7511->type == ADV7533) {
ret = adv7533_init_cec(adv7511);
@@ -1006,6 +1099,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto err_i2c_unregister_edid;
}
+ INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -1045,6 +1140,8 @@ err_unregister_cec:
adv7533_uninit_cec(adv7511);
err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
+uninit_regulators:
+ adv7511_uninit_regulators(adv7511);
return ret;
}
@@ -1058,6 +1155,8 @@ static int adv7511_remove(struct i2c_client *i2c)
adv7533_uninit_cec(adv7511);
}
+ adv7511_uninit_regulators(adv7511);
+
drm_bridge_remove(&adv7511->bridge);
adv7511_audio_exit(adv7511);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 18eefdcbf1ba..e7cd1056ff2d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -133,6 +133,7 @@ int analogix_dp_disable_psr(struct device *dev)
{
struct analogix_dp_device *dp = dev_get_drvdata(dev);
struct edp_vsc_psr psr_vsc;
+ int ret;
if (!dp->psr_support)
return 0;
@@ -147,6 +148,10 @@ int analogix_dp_disable_psr(struct device *dev)
psr_vsc.DB0 = 0;
psr_vsc.DB1 = 0;
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ if (ret != 1)
+ dev_err(dp->dev, "Failed to set DP Power0 %d\n", ret);
+
analogix_dp_send_psr_spd(dp, &psr_vsc);
return 0;
}
@@ -1227,12 +1232,10 @@ static int analogix_dp_create_bridge(struct drm_device *drm_dev,
dp->bridge = bridge;
- dp->encoder->bridge = bridge;
bridge->driver_private = dp;
- bridge->encoder = dp->encoder;
bridge->funcs = &analogix_dp_bridge_funcs;
- ret = drm_bridge_attach(drm_dev, bridge);
+ ret = drm_bridge_attach(dp->encoder, bridge, NULL);
if (ret) {
DRM_ERROR("failed to attach drm bridge\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index e5706981c934..86e9f9c7b59c 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -237,6 +237,7 @@ static int dumb_vga_remove(struct platform_device *pdev)
static const struct of_device_id dumb_vga_match[] = {
{ .compatible = "dumb-vga-dac" },
+ { .compatible = "ti,ths8135" },
{},
};
MODULE_DEVICE_TABLE(of, dumb_vga_match);
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index 235ce7d1583d..9a9ec27d9e28 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -113,13 +113,20 @@ struct dw_hdmi_i2c {
bool is_regaddr;
};
+struct dw_hdmi_phy_data {
+ enum dw_hdmi_phy_type type;
+ const char *name;
+ bool has_svsret;
+};
+
struct dw_hdmi {
struct drm_connector connector;
- struct drm_encoder *encoder;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
- struct platform_device *audio;
enum dw_hdmi_devtype dev_type;
+ unsigned int version;
+
+ struct platform_device *audio;
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
@@ -133,7 +140,9 @@ struct dw_hdmi {
u8 edid[HDMI_EDID_LEN];
bool cable_plugin;
+ const struct dw_hdmi_phy_data *phy;
bool phy_enabled;
+
struct drm_display_mode previous_mode;
struct i2c_adapter *ddc;
@@ -868,7 +877,7 @@ static bool hdmi_phy_wait_i2c_done(struct dw_hdmi *hdmi, int msec)
return true;
}
-static void __hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
+static void hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
unsigned char addr)
{
hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0);
@@ -882,13 +891,6 @@ static void __hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
hdmi_phy_wait_i2c_done(hdmi, 1000);
}
-static int hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
- unsigned char addr)
-{
- __hdmi_phy_i2c_write(hdmi, data, addr);
- return 0;
-}
-
static void dw_hdmi_phy_enable_powerdown(struct dw_hdmi *hdmi, bool enable)
{
hdmi_mask_writeb(hdmi, !enable, HDMI_PHY_CONF0,
@@ -903,11 +905,11 @@ static void dw_hdmi_phy_enable_tmds(struct dw_hdmi *hdmi, u8 enable)
HDMI_PHY_CONF0_ENTMDS_MASK);
}
-static void dw_hdmi_phy_enable_spare(struct dw_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_enable_svsret(struct dw_hdmi *hdmi, u8 enable)
{
hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
- HDMI_PHY_CONF0_SPARECTRL_OFFSET,
- HDMI_PHY_CONF0_SPARECTRL_MASK);
+ HDMI_PHY_CONF0_SVSRET_OFFSET,
+ HDMI_PHY_CONF0_SVSRET_MASK);
}
static void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable)
@@ -938,34 +940,14 @@ static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable)
HDMI_PHY_CONF0_SELDIPIF_MASK);
}
-static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
- unsigned char res, int cscon)
+static int hdmi_phy_configure(struct dw_hdmi *hdmi, int cscon)
{
- unsigned res_idx;
u8 val, msec;
const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
const struct dw_hdmi_mpll_config *mpll_config = pdata->mpll_cfg;
const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr;
const struct dw_hdmi_phy_config *phy_config = pdata->phy_config;
- if (prep)
- return -EINVAL;
-
- switch (res) {
- case 0: /* color resolution 0 is 8 bit colour depth */
- case 8:
- res_idx = DW_HDMI_RES_8;
- break;
- case 10:
- res_idx = DW_HDMI_RES_10;
- break;
- case 12:
- res_idx = DW_HDMI_RES_12;
- break;
- default:
- return -EINVAL;
- }
-
/* PLL/MPLL Cfg - always match on final entry */
for (; mpll_config->mpixelclock != ~0UL; mpll_config++)
if (hdmi->hdmi_data.video_mode.mpixelclock <=
@@ -1004,9 +986,13 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
/* gen2 pddq */
dw_hdmi_phy_gen2_pddq(hdmi, 1);
- /* PHY reset */
- hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
- hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_ASSERT, HDMI_MC_PHYRSTZ);
+ /* Leave low power consumption mode by asserting SVSRET. */
+ if (hdmi->phy->has_svsret)
+ dw_hdmi_phy_enable_svsret(hdmi, 1);
+
+ /* PHY reset. The reset signal is active high on Gen2 PHYs. */
+ hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ);
+ hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ);
hdmi_writeb(hdmi, HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST);
@@ -1015,21 +1001,26 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
HDMI_PHY_I2CM_SLAVE_ADDR);
hdmi_phy_test_clear(hdmi, 0);
- hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].cpce, 0x06);
- hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].gmp, 0x15);
-
- /* CURRCTRL */
- hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[res_idx], 0x10);
+ hdmi_phy_i2c_write(hdmi, mpll_config->res[0].cpce,
+ HDMI_3D_TX_PHY_CPCE_CTRL);
+ hdmi_phy_i2c_write(hdmi, mpll_config->res[0].gmp,
+ HDMI_3D_TX_PHY_GMPCTRL);
+ hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[0],
+ HDMI_3D_TX_PHY_CURRCTRL);
- hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
- hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
+ hdmi_phy_i2c_write(hdmi, 0, HDMI_3D_TX_PHY_PLLPHBYCTRL);
+ hdmi_phy_i2c_write(hdmi, HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_FB_CLK,
+ HDMI_3D_TX_PHY_MSM_CTRL);
- hdmi_phy_i2c_write(hdmi, phy_config->term, 0x19); /* TXTERM */
- hdmi_phy_i2c_write(hdmi, phy_config->sym_ctr, 0x09); /* CKSYMTXCTRL */
- hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr, 0x0E); /* VLEVCTRL */
+ hdmi_phy_i2c_write(hdmi, phy_config->term, HDMI_3D_TX_PHY_TXTERM);
+ hdmi_phy_i2c_write(hdmi, phy_config->sym_ctr,
+ HDMI_3D_TX_PHY_CKSYMTXCTRL);
+ hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr,
+ HDMI_3D_TX_PHY_VLEVCTRL);
- /* REMOVE CLK TERM */
- hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
+ /* Override and disable clock termination. */
+ hdmi_phy_i2c_write(hdmi, HDMI_3D_TX_PHY_CKCALCTRL_OVERRIDE,
+ HDMI_3D_TX_PHY_CKCALCTRL);
dw_hdmi_phy_enable_powerdown(hdmi, false);
@@ -1041,10 +1032,7 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
dw_hdmi_phy_gen2_txpwron(hdmi, 1);
dw_hdmi_phy_gen2_pddq(hdmi, 0);
- if (hdmi->dev_type == RK3288_HDMI)
- dw_hdmi_phy_enable_spare(hdmi, 1);
-
- /*Wait for PHY PLL lock */
+ /* Wait for PHY PLL lock */
msec = 5;
do {
val = hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
@@ -1079,7 +1067,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi)
dw_hdmi_phy_enable_powerdown(hdmi, true);
/* Enable CSC */
- ret = hdmi_phy_configure(hdmi, 0, 8, cscon);
+ ret = hdmi_phy_configure(hdmi, cscon);
if (ret)
return ret;
}
@@ -1351,19 +1339,38 @@ static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi)
/* Workaround to clear the overflow condition */
static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
{
- int count;
+ unsigned int count;
+ unsigned int i;
u8 val;
- /* TMDS software reset */
- hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ);
+ /*
+ * Under some circumstances the Frame Composer arithmetic unit can miss
+ * an FC register write due to being busy processing the previous one.
+ * The issue can be worked around by issuing a TMDS software reset and
+ * then write one of the FC registers several times.
+ *
+ * The number of iterations matters and depends on the HDMI TX revision
+ * (and possibly on the platform). So far only i.MX6Q (v1.30a) and
+ * i.MX6DL (v1.31a) have been identified as needing the workaround, with
+ * 4 and 1 iterations respectively.
+ */
- val = hdmi_readb(hdmi, HDMI_FC_INVIDCONF);
- if (hdmi->dev_type == IMX6DL_HDMI) {
- hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF);
+ switch (hdmi->version) {
+ case 0x130a:
+ count = 4;
+ break;
+ case 0x131a:
+ count = 1;
+ break;
+ default:
return;
}
- for (count = 0; count < 4; count++)
+ /* TMDS software reset */
+ hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ);
+
+ val = hdmi_readb(hdmi, HDMI_FC_INVIDCONF);
+ for (i = 0; i < count; i++)
hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF);
}
@@ -1586,42 +1593,6 @@ static void dw_hdmi_update_phy_mask(struct dw_hdmi *hdmi)
hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
}
-static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *orig_mode,
- struct drm_display_mode *mode)
-{
- struct dw_hdmi *hdmi = bridge->driver_private;
-
- mutex_lock(&hdmi->mutex);
-
- /* Store the display mode for plugin/DKMS poweron events */
- memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
-
- mutex_unlock(&hdmi->mutex);
-}
-
-static void dw_hdmi_bridge_disable(struct drm_bridge *bridge)
-{
- struct dw_hdmi *hdmi = bridge->driver_private;
-
- mutex_lock(&hdmi->mutex);
- hdmi->disabled = true;
- dw_hdmi_update_power(hdmi);
- dw_hdmi_update_phy_mask(hdmi);
- mutex_unlock(&hdmi->mutex);
-}
-
-static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
-{
- struct dw_hdmi *hdmi = bridge->driver_private;
-
- mutex_lock(&hdmi->mutex);
- hdmi->disabled = false;
- dw_hdmi_update_power(hdmi);
- dw_hdmi_update_phy_mask(hdmi);
- mutex_unlock(&hdmi->mutex);
-}
-
static enum drm_connector_status
dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -1714,7 +1685,63 @@ static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs =
.best_encoder = drm_atomic_helper_best_encoder,
};
+static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_connector *connector = &hdmi->connector;
+
+ connector->interlace_allowed = 1;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_helper_add(connector, &dw_hdmi_connector_helper_funcs);
+
+ drm_connector_init(bridge->dev, connector, &dw_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *orig_mode,
+ struct drm_display_mode *mode)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ mutex_lock(&hdmi->mutex);
+
+ /* Store the display mode for plugin/DKMS poweron events */
+ memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
+
+ mutex_unlock(&hdmi->mutex);
+}
+
+static void dw_hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ mutex_lock(&hdmi->mutex);
+ hdmi->disabled = true;
+ dw_hdmi_update_power(hdmi);
+ dw_hdmi_update_phy_mask(hdmi);
+ mutex_unlock(&hdmi->mutex);
+}
+
+static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ mutex_lock(&hdmi->mutex);
+ hdmi->disabled = false;
+ dw_hdmi_update_power(hdmi);
+ dw_hdmi_update_phy_mask(hdmi);
+ mutex_unlock(&hdmi->mutex);
+}
+
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
+ .attach = dw_hdmi_bridge_attach,
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.mode_set = dw_hdmi_bridge_mode_set,
@@ -1816,7 +1843,8 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
dev_dbg(hdmi->dev, "EVENT=%s\n",
phy_int_pol & HDMI_PHY_HPD ? "plugin" : "plugout");
- drm_helper_hpd_irq_event(hdmi->bridge->dev);
+ if (hdmi->bridge.dev)
+ drm_helper_hpd_irq_event(hdmi->bridge.dev);
}
hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
@@ -1826,68 +1854,80 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi)
-{
- struct drm_encoder *encoder = hdmi->encoder;
- struct drm_bridge *bridge;
- int ret;
-
- bridge = devm_kzalloc(drm->dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- DRM_ERROR("Failed to allocate drm bridge\n");
- return -ENOMEM;
- }
-
- hdmi->bridge = bridge;
- bridge->driver_private = hdmi;
- bridge->funcs = &dw_hdmi_bridge_funcs;
- ret = drm_bridge_attach(drm, bridge);
- if (ret) {
- DRM_ERROR("Failed to initialize bridge with drm\n");
- return -EINVAL;
+static const struct dw_hdmi_phy_data dw_hdmi_phys[] = {
+ {
+ .type = DW_HDMI_PHY_DWC_HDMI_TX_PHY,
+ .name = "DWC HDMI TX PHY",
+ }, {
+ .type = DW_HDMI_PHY_DWC_MHL_PHY_HEAC,
+ .name = "DWC MHL PHY + HEAC PHY",
+ .has_svsret = true,
+ }, {
+ .type = DW_HDMI_PHY_DWC_MHL_PHY,
+ .name = "DWC MHL PHY",
+ .has_svsret = true,
+ }, {
+ .type = DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY_HEAC,
+ .name = "DWC HDMI 3D TX PHY + HEAC PHY",
+ }, {
+ .type = DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY,
+ .name = "DWC HDMI 3D TX PHY",
+ }, {
+ .type = DW_HDMI_PHY_DWC_HDMI20_TX_PHY,
+ .name = "DWC HDMI 2.0 TX PHY",
+ .has_svsret = true,
}
+};
- encoder->bridge = bridge;
- hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+static int dw_hdmi_detect_phy(struct dw_hdmi *hdmi)
+{
+ unsigned int i;
+ u8 phy_type;
- drm_connector_helper_add(&hdmi->connector,
- &dw_hdmi_connector_helper_funcs);
+ phy_type = hdmi_readb(hdmi, HDMI_CONFIG2_ID);
- drm_connector_init(drm, &hdmi->connector,
- &dw_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ for (i = 0; i < ARRAY_SIZE(dw_hdmi_phys); ++i) {
+ if (dw_hdmi_phys[i].type == phy_type) {
+ hdmi->phy = &dw_hdmi_phys[i];
+ return 0;
+ }
+ }
- drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+ if (phy_type == DW_HDMI_PHY_VENDOR_PHY)
+ dev_err(hdmi->dev, "Unsupported vendor HDMI PHY\n");
+ else
+ dev_err(hdmi->dev, "Unsupported HDMI PHY type (%02x)\n",
+ phy_type);
- return 0;
+ return -ENODEV;
}
-int dw_hdmi_bind(struct device *dev, struct device *master,
- void *data, struct drm_encoder *encoder,
- struct resource *iores, int irq,
- const struct dw_hdmi_plat_data *plat_data)
+static struct dw_hdmi *
+__dw_hdmi_probe(struct platform_device *pdev,
+ const struct dw_hdmi_plat_data *plat_data)
{
- struct drm_device *drm = data;
+ struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct platform_device_info pdevinfo;
struct device_node *ddc_node;
struct dw_hdmi *hdmi;
+ struct resource *iores;
+ int irq;
int ret;
u32 val = 1;
+ u8 prod_id0;
+ u8 prod_id1;
u8 config0;
- u8 config1;
+ u8 config3;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
- return -ENOMEM;
-
- hdmi->connector.interlace_allowed = 1;
+ return ERR_PTR(-ENOMEM);
hdmi->plat_data = plat_data;
hdmi->dev = dev;
hdmi->dev_type = plat_data->dev_type;
hdmi->sample_rate = 48000;
- hdmi->encoder = encoder;
hdmi->disabled = true;
hdmi->rxsense = true;
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
@@ -1909,7 +1949,7 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
break;
default:
dev_err(dev, "reg-io-width must be 1 or 4\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
@@ -1918,13 +1958,14 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
of_node_put(ddc_node);
if (!hdmi->ddc) {
dev_dbg(hdmi->dev, "failed to read ddc node\n");
- return -EPROBE_DEFER;
+ return ERR_PTR(-EPROBE_DEFER);
}
} else {
dev_dbg(hdmi->dev, "no ddc property found\n");
}
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->regs = devm_ioremap_resource(dev, iores);
if (IS_ERR(hdmi->regs)) {
ret = PTR_ERR(hdmi->regs);
@@ -1958,15 +1999,36 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
}
/* Product and revision IDs */
- dev_info(dev,
- "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
- hdmi_readb(hdmi, HDMI_DESIGN_ID),
- hdmi_readb(hdmi, HDMI_REVISION_ID),
- hdmi_readb(hdmi, HDMI_PRODUCT_ID0),
- hdmi_readb(hdmi, HDMI_PRODUCT_ID1));
+ hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8)
+ | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0);
+ prod_id0 = hdmi_readb(hdmi, HDMI_PRODUCT_ID0);
+ prod_id1 = hdmi_readb(hdmi, HDMI_PRODUCT_ID1);
+
+ if (prod_id0 != HDMI_PRODUCT_ID0_HDMI_TX ||
+ (prod_id1 & ~HDMI_PRODUCT_ID1_HDCP) != HDMI_PRODUCT_ID1_HDMI_TX) {
+ dev_err(dev, "Unsupported HDMI controller (%04x:%02x:%02x)\n",
+ hdmi->version, prod_id0, prod_id1);
+ ret = -ENODEV;
+ goto err_iahb;
+ }
+
+ ret = dw_hdmi_detect_phy(hdmi);
+ if (ret < 0)
+ goto err_iahb;
+
+ dev_info(dev, "Detected HDMI TX controller v%x.%03x %s HDCP (%s)\n",
+ hdmi->version >> 12, hdmi->version & 0xfff,
+ prod_id1 & HDMI_PRODUCT_ID1_HDCP ? "with" : "without",
+ hdmi->phy->name);
initialize_hdmi_ih_mutes(hdmi);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_iahb;
+ }
+
ret = devm_request_threaded_irq(dev, irq, dw_hdmi_hardirq,
dw_hdmi_irq, IRQF_SHARED,
dev_name(dev), hdmi);
@@ -1996,11 +2058,13 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
HDMI_IH_PHY_STAT0);
- ret = dw_hdmi_fb_registered(hdmi);
- if (ret)
- goto err_iahb;
+ hdmi->bridge.driver_private = hdmi;
+ hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
+#ifdef CONFIG_OF
+ hdmi->bridge.of_node = pdev->dev.of_node;
+#endif
- ret = dw_hdmi_register(drm, hdmi);
+ ret = dw_hdmi_fb_registered(hdmi);
if (ret)
goto err_iahb;
@@ -2013,9 +2077,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
pdevinfo.id = PLATFORM_DEVID_AUTO;
config0 = hdmi_readb(hdmi, HDMI_CONFIG0_ID);
- config1 = hdmi_readb(hdmi, HDMI_CONFIG1_ID);
+ config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID);
- if (config1 & HDMI_CONFIG1_AHB) {
+ if (config3 & HDMI_CONFIG3_AHBAUDDMA) {
struct dw_hdmi_audio_data audio;
audio.phys = iores->start;
@@ -2047,9 +2111,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
if (hdmi->i2c)
dw_hdmi_i2c_init(hdmi);
- dev_set_drvdata(dev, hdmi);
+ platform_set_drvdata(pdev, hdmi);
- return 0;
+ return hdmi;
err_iahb:
if (hdmi->i2c) {
@@ -2063,14 +2127,11 @@ err_isfr:
err_res:
i2c_put_adapter(hdmi->ddc);
- return ret;
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dw_hdmi_bind);
-void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
+static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
{
- struct dw_hdmi *hdmi = dev_get_drvdata(dev);
-
if (hdmi->audio && !IS_ERR(hdmi->audio))
platform_device_unregister(hdmi->audio);
@@ -2085,6 +2146,70 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
else
i2c_put_adapter(hdmi->ddc);
}
+
+/* -----------------------------------------------------------------------------
+ * Probe/remove API, used from platforms based on the DRM bridge API.
+ */
+int dw_hdmi_probe(struct platform_device *pdev,
+ const struct dw_hdmi_plat_data *plat_data)
+{
+ struct dw_hdmi *hdmi;
+ int ret;
+
+ hdmi = __dw_hdmi_probe(pdev, plat_data);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
+
+ ret = drm_bridge_add(&hdmi->bridge);
+ if (ret < 0) {
+ __dw_hdmi_remove(hdmi);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_probe);
+
+void dw_hdmi_remove(struct platform_device *pdev)
+{
+ struct dw_hdmi *hdmi = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&hdmi->bridge);
+
+ __dw_hdmi_remove(hdmi);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_remove);
+
+/* -----------------------------------------------------------------------------
+ * Bind/unbind API, used from platforms based on the component framework.
+ */
+int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
+ const struct dw_hdmi_plat_data *plat_data)
+{
+ struct dw_hdmi *hdmi;
+ int ret;
+
+ hdmi = __dw_hdmi_probe(pdev, plat_data);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
+
+ ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL);
+ if (ret) {
+ dw_hdmi_remove(pdev);
+ DRM_ERROR("Failed to initialize bridge with drm\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_bind);
+
+void dw_hdmi_unbind(struct device *dev)
+{
+ struct dw_hdmi *hdmi = dev_get_drvdata(dev);
+
+ __dw_hdmi_remove(hdmi);
+}
EXPORT_SYMBOL_GPL(dw_hdmi_unbind);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h
index 55135bbd0c16..325b0b8ae639 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi.h
@@ -545,12 +545,24 @@
#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
enum {
+/* PRODUCT_ID0 field values */
+ HDMI_PRODUCT_ID0_HDMI_TX = 0xa0,
+
+/* PRODUCT_ID1 field values */
+ HDMI_PRODUCT_ID1_HDCP = 0xc0,
+ HDMI_PRODUCT_ID1_HDMI_RX = 0x02,
+ HDMI_PRODUCT_ID1_HDMI_TX = 0x01,
+
/* CONFIG0_ID field values */
HDMI_CONFIG0_I2S = 0x10,
/* CONFIG1_ID field values */
HDMI_CONFIG1_AHB = 0x01,
+/* CONFIG3_ID field values */
+ HDMI_CONFIG3_AHBAUDDMA = 0x02,
+ HDMI_CONFIG3_GPAUD = 0x01,
+
/* IH_FC_INT2 field values */
HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03,
HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
@@ -847,8 +859,8 @@ enum {
HDMI_PHY_CONF0_PDZ_OFFSET = 7,
HDMI_PHY_CONF0_ENTMDS_MASK = 0x40,
HDMI_PHY_CONF0_ENTMDS_OFFSET = 6,
- HDMI_PHY_CONF0_SPARECTRL_MASK = 0x20,
- HDMI_PHY_CONF0_SPARECTRL_OFFSET = 5,
+ HDMI_PHY_CONF0_SVSRET_MASK = 0x20,
+ HDMI_PHY_CONF0_SVSRET_OFFSET = 5,
HDMI_PHY_CONF0_GEN2_PDDQ_MASK = 0x10,
HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET = 4,
HDMI_PHY_CONF0_GEN2_TXPWRON_MASK = 0x8,
@@ -977,8 +989,7 @@ enum {
HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS = 0x0,
/* MC_PHYRSTZ field values */
- HDMI_MC_PHYRSTZ_ASSERT = 0x0,
- HDMI_MC_PHYRSTZ_DEASSERT = 0x1,
+ HDMI_MC_PHYRSTZ_PHYRSTZ = 0x01,
/* MC_HEACPHY_RST field values */
HDMI_MC_HEACPHY_RST_ASSERT = 0x1,
@@ -1073,4 +1084,70 @@ enum {
HDMI_I2CM_CTLINT_ARB_MASK = 0x4,
};
+/*
+ * HDMI 3D TX PHY registers
+ */
+#define HDMI_3D_TX_PHY_PWRCTRL 0x00
+#define HDMI_3D_TX_PHY_SERDIVCTRL 0x01
+#define HDMI_3D_TX_PHY_SERCKCTRL 0x02
+#define HDMI_3D_TX_PHY_SERCKKILLCTRL 0x03
+#define HDMI_3D_TX_PHY_TXRESCTRL 0x04
+#define HDMI_3D_TX_PHY_CKCALCTRL 0x05
+#define HDMI_3D_TX_PHY_CPCE_CTRL 0x06
+#define HDMI_3D_TX_PHY_TXCLKMEASCTRL 0x07
+#define HDMI_3D_TX_PHY_TXMEASCTRL 0x08
+#define HDMI_3D_TX_PHY_CKSYMTXCTRL 0x09
+#define HDMI_3D_TX_PHY_CMPSEQCTRL 0x0a
+#define HDMI_3D_TX_PHY_CMPPWRCTRL 0x0b
+#define HDMI_3D_TX_PHY_CMPMODECTRL 0x0c
+#define HDMI_3D_TX_PHY_MEASCTRL 0x0d
+#define HDMI_3D_TX_PHY_VLEVCTRL 0x0e
+#define HDMI_3D_TX_PHY_D2ACTRL 0x0f
+#define HDMI_3D_TX_PHY_CURRCTRL 0x10
+#define HDMI_3D_TX_PHY_DRVANACTRL 0x11
+#define HDMI_3D_TX_PHY_PLLMEASCTRL 0x12
+#define HDMI_3D_TX_PHY_PLLPHBYCTRL 0x13
+#define HDMI_3D_TX_PHY_GRP_CTRL 0x14
+#define HDMI_3D_TX_PHY_GMPCTRL 0x15
+#define HDMI_3D_TX_PHY_MPLLMEASCTRL 0x16
+#define HDMI_3D_TX_PHY_MSM_CTRL 0x17
+#define HDMI_3D_TX_PHY_SCRPB_STATUS 0x18
+#define HDMI_3D_TX_PHY_TXTERM 0x19
+#define HDMI_3D_TX_PHY_PTRPT_ENBL 0x1a
+#define HDMI_3D_TX_PHY_PATTERNGEN 0x1b
+#define HDMI_3D_TX_PHY_SDCAP_MODE 0x1c
+#define HDMI_3D_TX_PHY_SCOPEMODE 0x1d
+#define HDMI_3D_TX_PHY_DIGTXMODE 0x1e
+#define HDMI_3D_TX_PHY_STR_STATUS 0x1f
+#define HDMI_3D_TX_PHY_SCOPECNT0 0x20
+#define HDMI_3D_TX_PHY_SCOPECNT1 0x21
+#define HDMI_3D_TX_PHY_SCOPECNT2 0x22
+#define HDMI_3D_TX_PHY_SCOPECNTCLK 0x23
+#define HDMI_3D_TX_PHY_SCOPESAMPLE 0x24
+#define HDMI_3D_TX_PHY_SCOPECNTMSB01 0x25
+#define HDMI_3D_TX_PHY_SCOPECNTMSB2CK 0x26
+
+/* HDMI_3D_TX_PHY_CKCALCTRL values */
+#define HDMI_3D_TX_PHY_CKCALCTRL_OVERRIDE BIT(15)
+
+/* HDMI_3D_TX_PHY_MSM_CTRL values */
+#define HDMI_3D_TX_PHY_MSM_CTRL_MPLL_PH_SEL_CK BIT(13)
+#define HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_CLK_REF_MPLL (0 << 1)
+#define HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_OFF (1 << 1)
+#define HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_PCLK (2 << 1)
+#define HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_FB_CLK (3 << 1)
+#define HDMI_3D_TX_PHY_MSM_CTRL_SCOPE_CK_SEL BIT(0)
+
+/* HDMI_3D_TX_PHY_PTRPT_ENBL values */
+#define HDMI_3D_TX_PHY_PTRPT_ENBL_OVERRIDE BIT(15)
+#define HDMI_3D_TX_PHY_PTRPT_ENBL_PG_SKIP_BIT2 BIT(8)
+#define HDMI_3D_TX_PHY_PTRPT_ENBL_PG_SKIP_BIT1 BIT(7)
+#define HDMI_3D_TX_PHY_PTRPT_ENBL_PG_SKIP_BIT0 BIT(6)
+#define HDMI_3D_TX_PHY_PTRPT_ENBL_CK_REF_ENB BIT(5)
+#define HDMI_3D_TX_PHY_PTRPT_ENBL_RCAL_ENB BIT(4)
+#define HDMI_3D_TX_PHY_PTRPT_ENBL_TX_CLK_ALIGN_ENB BIT(3)
+#define HDMI_3D_TX_PHY_PTRPT_ENBL_TX_READY BIT(2)
+#define HDMI_3D_TX_PHY_PTRPT_ENBL_CKO_WORD_ENB BIT(1)
+#define HDMI_3D_TX_PHY_PTRPT_ENBL_REFCLK_ENB BIT(0)
+
#endif /* __DW_HDMI_H__ */
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index b2c267df7ee7..cdd0a9d44ba1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#include <asm/unaligned.h>
+
#include <drm/bridge/mhl.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -28,7 +30,10 @@
#include "sil-sii8620.h"
-#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+#define SII8620_BURST_BUF_LEN 288
+#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+#define MHL1_MAX_LCLK 225000
+#define MHL3_MAX_LCLK 600000
enum sii8620_mode {
CM_DISCONNECTED,
@@ -59,6 +64,9 @@ struct sii8620 {
struct regulator_bulk_data supplies[2];
struct mutex lock; /* context lock, protects fields below */
int error;
+ int pixel_clock;
+ unsigned int use_packed_pixel:1;
+ int video_code;
enum sii8620_mode mode;
enum sii8620_sink_type sink_type;
u8 cbus_status;
@@ -66,11 +74,20 @@ struct sii8620 {
u8 xstat[MHL_XDS_SIZE];
u8 devcap[MHL_DCAP_SIZE];
u8 xdevcap[MHL_XDC_SIZE];
- u8 avif[19];
+ u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
struct edid *edid;
unsigned int gen2_write_burst:1;
enum sii8620_mt_state mt_state;
struct list_head mt_queue;
+ struct {
+ int r_size;
+ int r_count;
+ int rx_ack;
+ int rx_count;
+ u8 rx_buf[32];
+ int tx_count;
+ u8 tx_buf[32];
+ } burst;
};
struct sii8620_mt_msg;
@@ -78,12 +95,15 @@ struct sii8620_mt_msg;
typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
struct sii8620_mt_msg *msg);
+typedef void (*sii8620_cb)(struct sii8620 *ctx, int ret);
+
struct sii8620_mt_msg {
struct list_head node;
u8 reg[4];
u8 ret;
sii8620_mt_msg_cb send;
sii8620_mt_msg_cb recv;
+ sii8620_cb continuation;
};
static const u8 sii8620_i2c_page[] = {
@@ -101,6 +121,7 @@ static void sii8620_fetch_edid(struct sii8620 *ctx);
static void sii8620_set_upstream_edid(struct sii8620 *ctx);
static void sii8620_enable_hpd(struct sii8620 *ctx);
static void sii8620_mhl_disconnected(struct sii8620 *ctx);
+static void sii8620_disconnect(struct sii8620 *ctx);
static int sii8620_clear_error(struct sii8620 *ctx)
{
@@ -227,6 +248,11 @@ static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val)
sii8620_write(ctx, addr, val);
}
+static inline bool sii8620_is_mhl3(struct sii8620 *ctx)
+{
+ return ctx->mode >= CM_MHL3;
+}
+
static void sii8620_mt_cleanup(struct sii8620 *ctx)
{
struct sii8620_mt_msg *msg, *n;
@@ -251,9 +277,11 @@ static void sii8620_mt_work(struct sii8620 *ctx)
ctx->mt_state = MT_STATE_READY;
msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
node);
+ list_del(&msg->node);
if (msg->recv)
msg->recv(ctx, msg);
- list_del(&msg->node);
+ if (msg->continuation)
+ msg->continuation(ctx, msg->ret);
kfree(msg);
}
@@ -266,9 +294,59 @@ static void sii8620_mt_work(struct sii8620 *ctx)
msg->send(ctx, msg);
}
+static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
+{
+ u8 ctrl = BIT_MDT_RCV_CTRL_MDT_RCV_EN;
+
+ if (ctx->gen2_write_burst)
+ return;
+
+ if (ctx->mode >= CM_MHL1)
+ ctrl |= BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN;
+
+ sii8620_write_seq(ctx,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_MDT_RCV_CTRL, ctrl
+ );
+ ctx->gen2_write_burst = 1;
+}
+
+static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (!ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_CTRL, 0,
+ REG_MDT_RCV_CTRL, 0
+ );
+ ctx->gen2_write_burst = 0;
+}
+
+static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
+ | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
+ | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
+ | BIT_MDT_XMIT_SM_ERROR,
+ REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
+ | BIT_MDT_IDLE_AFTER_HAWB_DISABLE
+ | BIT_MDT_RFIFO_DATA_RDY
+ );
+ sii8620_enable_gen2_write_burst(ctx);
+}
+
static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
struct sii8620_mt_msg *msg)
{
+ if (msg->reg[0] == MHL_SET_INT &&
+ msg->reg[1] == MHL_INT_REG(RCHANGE) &&
+ msg->reg[2] == MHL_INT_RC_FEAT_REQ)
+ sii8620_enable_gen2_write_burst(ctx);
+ else
+ sii8620_disable_gen2_write_burst(ctx);
+
switch (msg->reg[0]) {
case MHL_WRITE_STAT:
case MHL_SET_INT:
@@ -281,6 +359,12 @@ static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
sii8620_write(ctx, REG_MSC_COMMAND_START,
BIT_MSC_COMMAND_START_MSC_MSG);
break;
+ case MHL_READ_DEVCAP_REG:
+ case MHL_READ_XDEVCAP_REG:
+ sii8620_write(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg[1]);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_READ_DEVCAP);
+ break;
default:
dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
msg->reg[0]);
@@ -299,6 +383,21 @@ static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx)
return msg;
}
+static void sii8620_mt_set_cont(struct sii8620 *ctx, sii8620_cb cont)
+{
+ struct sii8620_mt_msg *msg;
+
+ if (ctx->error)
+ return;
+
+ if (list_empty(&ctx->mt_queue)) {
+ ctx->error = -EINVAL;
+ return;
+ }
+ msg = list_last_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+ msg->continuation = cont;
+}
+
static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
{
struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
@@ -358,7 +457,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
}
}
-static void sii8620_mr_devcap(struct sii8620 *ctx)
+static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
{
static const char * const sink_str[] = {
[SINK_NONE] = "NONE",
@@ -366,23 +465,10 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
[SINK_DVI] = "DVI"
};
- u8 dcap[MHL_DCAP_SIZE];
char sink_name[20];
struct device *dev = ctx->dev;
- sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
- if (ctx->error < 0)
- return;
-
- dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap);
- dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
- dcap[MHL_DCAP_MHL_VERSION] / 16,
- dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H],
- dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H],
- dcap[MHL_DCAP_DEVICE_ID_L]);
- sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
-
- if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK))
+ if (ret < 0)
return;
sii8620_fetch_edid(ctx);
@@ -401,18 +487,76 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
dev_info(dev, "detected sink(type: %s): %s\n",
sink_str[ctx->sink_type], sink_name);
+}
+
+static void sii8620_hsic_init(struct sii8620 *ctx)
+{
+ if (!sii8620_is_mhl3(ctx))
+ return;
+
+ sii8620_write(ctx, REG_FCGC,
+ BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
+ sii8620_setbits(ctx, REG_HRXCTRL3,
+ BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
+ sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
+ sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
+ sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
+ sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
+ sii8620_write_seq_static(ctx,
+ REG_TDMLLCTL, 0,
+ REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
+ BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
+ REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
+ REG_HRXINTL, 0xff,
+ REG_HRXINTH, 0xff,
+ REG_TTXINTL, 0xff,
+ REG_TTXINTH, 0xff,
+ REG_TRXINTL, 0xff,
+ REG_TRXINTH, 0xff,
+ REG_HTXINTL, 0xff,
+ REG_HTXINTH, 0xff,
+ REG_FCINTR0, 0xff,
+ REG_FCINTR1, 0xff,
+ REG_FCINTR2, 0xff,
+ REG_FCINTR3, 0xff,
+ REG_FCINTR4, 0xff,
+ REG_FCINTR5, 0xff,
+ REG_FCINTR6, 0xff,
+ REG_FCINTR7, 0xff
+ );
+}
+
+static void sii8620_edid_read(struct sii8620 *ctx, int ret)
+{
+ if (ret < 0)
+ return;
+
sii8620_set_upstream_edid(ctx);
+ sii8620_hsic_init(ctx);
sii8620_enable_hpd(ctx);
}
+static void sii8620_mr_devcap(struct sii8620 *ctx)
+{
+ u8 dcap[MHL_DCAP_SIZE];
+ struct device *dev = ctx->dev;
+
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
+ if (ctx->error < 0)
+ return;
+
+ dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
+ dcap[MHL_DCAP_MHL_VERSION] / 16,
+ dcap[MHL_DCAP_MHL_VERSION] % 16,
+ dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
+ dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
+ sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+}
+
static void sii8620_mr_xdevcap(struct sii8620 *ctx)
{
sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
MHL_XDC_SIZE);
-
- sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
- MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
- sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
}
static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
@@ -450,6 +594,197 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
msg->recv = sii8620_mt_read_devcap_recv;
}
+static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 reg = msg->reg[0] & 0x7f;
+
+ if (msg->reg[0] & 0x80)
+ ctx->xdevcap[reg] = msg->ret;
+ else
+ ctx->devcap[reg] = msg->ret;
+}
+
+static void sii8620_mt_read_devcap_reg(struct sii8620 *ctx, u8 reg)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = (reg & 0x80) ? MHL_READ_XDEVCAP_REG : MHL_READ_DEVCAP_REG;
+ msg->reg[1] = reg;
+ msg->send = sii8620_mt_msc_cmd_send;
+ msg->recv = sii8620_mt_read_devcap_reg_recv;
+}
+
+static inline void sii8620_mt_read_xdevcap_reg(struct sii8620 *ctx, u8 reg)
+{
+ sii8620_mt_read_devcap_reg(ctx, reg | 0x80);
+}
+
+static void *sii8620_burst_get_tx_buf(struct sii8620 *ctx, int len)
+{
+ u8 *buf = &ctx->burst.tx_buf[ctx->burst.tx_count];
+ int size = len + 2;
+
+ if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+ dev_err(ctx->dev, "TX-BLK buffer exhausted\n");
+ ctx->error = -EINVAL;
+ return NULL;
+ }
+
+ ctx->burst.tx_count += size;
+ buf[1] = len;
+
+ return buf + 2;
+}
+
+static u8 *sii8620_burst_get_rx_buf(struct sii8620 *ctx, int len)
+{
+ u8 *buf = &ctx->burst.rx_buf[ctx->burst.rx_count];
+ int size = len + 1;
+
+ if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+ dev_err(ctx->dev, "RX-BLK buffer exhausted\n");
+ ctx->error = -EINVAL;
+ return NULL;
+ }
+
+ ctx->burst.rx_count += size;
+ buf[0] = len;
+
+ return buf + 1;
+}
+
+static void sii8620_burst_send(struct sii8620 *ctx)
+{
+ int tx_left = ctx->burst.tx_count;
+ u8 *d = ctx->burst.tx_buf;
+
+ while (tx_left > 0) {
+ int len = d[1] + 2;
+
+ if (ctx->burst.r_count + len > ctx->burst.r_size)
+ break;
+ d[0] = min(ctx->burst.rx_ack, 255);
+ ctx->burst.rx_ack -= d[0];
+ sii8620_write_buf(ctx, REG_EMSC_XMIT_WRITE_PORT, d, len);
+ ctx->burst.r_count += len;
+ tx_left -= len;
+ d += len;
+ }
+
+ ctx->burst.tx_count = tx_left;
+
+ while (ctx->burst.rx_ack > 0) {
+ u8 b[2] = { min(ctx->burst.rx_ack, 255), 0 };
+
+ if (ctx->burst.r_count + 2 > ctx->burst.r_size)
+ break;
+ ctx->burst.rx_ack -= b[0];
+ sii8620_write_buf(ctx, REG_EMSC_XMIT_WRITE_PORT, b, 2);
+ ctx->burst.r_count += 2;
+ }
+}
+
+static void sii8620_burst_receive(struct sii8620 *ctx)
+{
+ u8 buf[3], *d;
+ int count;
+
+ sii8620_read_buf(ctx, REG_EMSCRFIFOBCNTL, buf, 2);
+ count = get_unaligned_le16(buf);
+ while (count > 0) {
+ int len = min(count, 3);
+
+ sii8620_read_buf(ctx, REG_EMSC_RCV_READ_PORT, buf, len);
+ count -= len;
+ ctx->burst.rx_ack += len - 1;
+ ctx->burst.r_count -= buf[1];
+ if (ctx->burst.r_count < 0)
+ ctx->burst.r_count = 0;
+
+ if (len < 3 || !buf[2])
+ continue;
+
+ len = buf[2];
+ d = sii8620_burst_get_rx_buf(ctx, len);
+ if (!d)
+ continue;
+ sii8620_read_buf(ctx, REG_EMSC_RCV_READ_PORT, d, len);
+ count -= len;
+ ctx->burst.rx_ack += len;
+ }
+}
+
+static void sii8620_burst_tx_rbuf_info(struct sii8620 *ctx, int size)
+{
+ struct mhl_burst_blk_rcv_buffer_info *d =
+ sii8620_burst_get_tx_buf(ctx, sizeof(*d));
+ if (!d)
+ return;
+
+ d->id = cpu_to_be16(MHL_BURST_ID_BLK_RCV_BUFFER_INFO);
+ d->size = cpu_to_le16(size);
+}
+
+static u8 sii8620_checksum(void *ptr, int size)
+{
+ u8 *d = ptr, sum = 0;
+
+ while (size--)
+ sum += *d++;
+
+ return sum;
+}
+
+static void sii8620_mhl_burst_hdr_set(struct mhl3_burst_header *h,
+ enum mhl_burst_id id)
+{
+ h->id = cpu_to_be16(id);
+ h->total_entries = 1;
+ h->sequence_index = 1;
+}
+
+static void sii8620_burst_tx_bits_per_pixel_fmt(struct sii8620 *ctx, u8 fmt)
+{
+ struct mhl_burst_bits_per_pixel_fmt *d;
+ const int size = sizeof(*d) + sizeof(d->desc[0]);
+
+ d = sii8620_burst_get_tx_buf(ctx, size);
+ if (!d)
+ return;
+
+ sii8620_mhl_burst_hdr_set(&d->hdr, MHL_BURST_ID_BITS_PER_PIXEL_FMT);
+ d->num_entries = 1;
+ d->desc[0].stream_id = 0;
+ d->desc[0].pixel_format = fmt;
+ d->hdr.checksum -= sii8620_checksum(d, size);
+}
+
+static void sii8620_burst_rx_all(struct sii8620 *ctx)
+{
+ u8 *d = ctx->burst.rx_buf;
+ int count = ctx->burst.rx_count;
+
+ while (count-- > 0) {
+ int len = *d++;
+ int id = get_unaligned_be16(&d[0]);
+
+ switch (id) {
+ case MHL_BURST_ID_BLK_RCV_BUFFER_INFO:
+ ctx->burst.r_size = get_unaligned_le16(&d[2]);
+ break;
+ default:
+ break;
+ }
+ count -= len;
+ d += len;
+ }
+ ctx->burst.rx_count = 0;
+}
+
static void sii8620_fetch_edid(struct sii8620 *ctx)
{
u8 lm_ddc, ddc_cmd, int3, cbus;
@@ -537,12 +872,12 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
edid = new_edid;
}
}
-
- if (fetched + FETCH_SIZE == edid_len)
- sii8620_write(ctx, REG_INTR3, int3);
}
- sii8620_write(ctx, REG_LM_DDC, lm_ddc);
+ sii8620_write_seq(ctx,
+ REG_INTR3_MASK, BIT_DDC_CMD_DONE,
+ REG_LM_DDC, lm_ddc
+ );
end:
kfree(ctx->edid);
@@ -641,11 +976,10 @@ static void sii8620_hw_reset(struct sii8620 *ctx)
static void sii8620_cbus_reset(struct sii8620 *ctx)
{
- sii8620_write_seq_static(ctx,
- REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
- | BIT_PWD_SRST_CBUS_RST_SW_EN,
- REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN
- );
+ sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
+ | BIT_PWD_SRST_CBUS_RST_SW_EN);
+ usleep_range(10000, 20000);
+ sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN);
}
static void sii8620_set_auto_zone(struct sii8620 *ctx)
@@ -683,48 +1017,208 @@ static void sii8620_stop_video(struct sii8620 *ctx)
| BIT_TPI_SC_TPI_AV_MUTE;
break;
case SINK_HDMI:
+ default:
val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
| BIT_TPI_SC_TPI_AV_MUTE
| BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
break;
- default:
- return;
}
sii8620_write(ctx, REG_TPI_SC, val);
}
+static void sii8620_set_format(struct sii8620 *ctx)
+{
+ u8 out_fmt;
+
+ if (sii8620_is_mhl3(ctx)) {
+ sii8620_setbits(ctx, REG_M3_P0CTRL,
+ BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
+ ctx->use_packed_pixel ? ~0 : 0);
+ } else {
+ if (ctx->use_packed_pixel)
+ sii8620_write_seq_static(ctx,
+ REG_VID_MODE, BIT_VID_MODE_M1080P,
+ REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
+ REG_MHLTX_CTL6, 0x60
+ );
+ else
+ sii8620_write_seq_static(ctx,
+ REG_VID_MODE, 0,
+ REG_MHL_TOP_CTL, 1,
+ REG_MHLTX_CTL6, 0xa0
+ );
+ }
+
+ if (ctx->use_packed_pixel)
+ out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
+ BIT_TPI_OUTPUT_CSCMODE709;
+ else
+ out_fmt = VAL_TPI_FORMAT(RGB, FULL);
+
+ sii8620_write_seq(ctx,
+ REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
+ REG_TPI_OUTPUT, out_fmt,
+ );
+}
+
+static int mhl3_infoframe_init(struct mhl3_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->version = 3;
+ frame->hev_format = -1;
+ return 0;
+}
+
+static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
+ void *buffer, size_t size)
+{
+ const int frm_len = HDMI_INFOFRAME_HEADER_SIZE + MHL3_INFOFRAME_SIZE;
+ u8 *ptr = buffer;
+
+ if (size < frm_len)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+ ptr[0] = HDMI_INFOFRAME_TYPE_VENDOR;
+ ptr[1] = frame->version;
+ ptr[2] = MHL3_INFOFRAME_SIZE;
+ ptr[4] = MHL3_IEEE_OUI & 0xff;
+ ptr[5] = (MHL3_IEEE_OUI >> 8) & 0xff;
+ ptr[6] = (MHL3_IEEE_OUI >> 16) & 0xff;
+ ptr[7] = frame->video_format & 0x3;
+ ptr[7] |= (frame->format_type & 0x7) << 2;
+ ptr[7] |= frame->sep_audio ? BIT(5) : 0;
+ if (frame->hev_format >= 0) {
+ ptr[9] = 1;
+ ptr[10] = (frame->hev_format >> 8) & 0xff;
+ ptr[11] = frame->hev_format & 0xff;
+ }
+ if (frame->av_delay) {
+ bool sign = frame->av_delay < 0;
+ int delay = sign ? -frame->av_delay : frame->av_delay;
+
+ ptr[12] = (delay >> 16) & 0xf;
+ if (sign)
+ ptr[12] |= BIT(4);
+ ptr[13] = (delay >> 8) & 0xff;
+ ptr[14] = delay & 0xff;
+ }
+ ptr[3] -= sii8620_checksum(buffer, frm_len);
+ return frm_len;
+}
+
+static void sii8620_set_infoframes(struct sii8620 *ctx)
+{
+ struct mhl3_infoframe mhl_frm;
+ union hdmi_infoframe frm;
+ u8 buf[31];
+ int ret;
+
+ if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
+ sii8620_write(ctx, REG_TPI_SC,
+ BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
+ ARRAY_SIZE(ctx->avif) - 3);
+ sii8620_write(ctx, REG_PKT_FILTER_0,
+ BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
+ BIT_PKT_FILTER_0_DROP_MPEG_PKT |
+ BIT_PKT_FILTER_0_DROP_GCP_PKT,
+ BIT_PKT_FILTER_1_DROP_GEN_PKT);
+ return;
+ }
+
+ ret = hdmi_avi_infoframe_init(&frm.avi);
+ frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
+ frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
+ frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
+ frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
+ frm.avi.video_code = ctx->video_code;
+ if (!ret)
+ ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
+ if (ret > 0)
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
+ sii8620_write(ctx, REG_PKT_FILTER_0,
+ BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
+ BIT_PKT_FILTER_0_DROP_MPEG_PKT |
+ BIT_PKT_FILTER_0_DROP_AVI_PKT |
+ BIT_PKT_FILTER_0_DROP_GCP_PKT,
+ BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS |
+ BIT_PKT_FILTER_1_DROP_GEN_PKT |
+ BIT_PKT_FILTER_1_DROP_VSIF_PKT);
+
+ sii8620_write(ctx, REG_TPI_INFO_FSEL, BIT_TPI_INFO_FSEL_EN
+ | BIT_TPI_INFO_FSEL_RPT | VAL_TPI_INFO_FSEL_VSI);
+ ret = mhl3_infoframe_init(&mhl_frm);
+ if (!ret)
+ ret = mhl3_infoframe_pack(&mhl_frm, buf, ARRAY_SIZE(buf));
+ sii8620_write_buf(ctx, REG_TPI_INFO_B0, buf, ret);
+}
+
static void sii8620_start_hdmi(struct sii8620 *ctx)
{
sii8620_write_seq_static(ctx,
REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
| BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
- | BIT_VID_OVRRD_M1080P_OVRRD,
- REG_VID_MODE, 0,
- REG_MHL_TOP_CTL, 0x1,
- REG_MHLTX_CTL6, 0xa0,
- REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
- REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
- );
-
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL |
- MHL_DST_LM_PATH_ENABLED);
+ | BIT_VID_OVRRD_M1080P_OVRRD);
+ sii8620_set_format(ctx);
- sii8620_set_auto_zone(ctx);
+ if (!sii8620_is_mhl3(ctx)) {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
+ sii8620_set_auto_zone(ctx);
+ } else {
+ static const struct {
+ int max_clk;
+ u8 zone;
+ u8 link_rate;
+ u8 rrp_decode;
+ } clk_spec[] = {
+ { 150000, VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS,
+ MHL_XDS_LINK_RATE_1_5_GBPS, 0x38 },
+ { 300000, VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS,
+ MHL_XDS_LINK_RATE_3_0_GBPS, 0x40 },
+ { 600000, VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS,
+ MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
+ };
+ u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
+ int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
+ if (clk < clk_spec[i].max_clk)
+ break;
- sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+ if (100 * clk >= 98 * clk_spec[i].max_clk)
+ p0_ctrl |= BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN;
- sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif,
- ARRAY_SIZE(ctx->avif));
+ sii8620_burst_tx_bits_per_pixel_fmt(ctx, ctx->use_packed_pixel);
+ sii8620_burst_send(ctx);
+ sii8620_write_seq(ctx,
+ REG_MHL_DP_CTL0, 0xf0,
+ REG_MHL3_TX_ZONE_CTL, clk_spec[i].zone);
+ sii8620_setbits(ctx, REG_M3_P0CTRL,
+ BIT_M3_P0CTRL_MHL3_P0_PORT_EN
+ | BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN, p0_ctrl);
+ sii8620_setbits(ctx, REG_M3_POSTM, MSK_M3_POSTM_RRP_DECODE,
+ clk_spec[i].rrp_decode);
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE
+ | BIT_M3_CTRL_H2M_SWRST,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE
+ );
+ sii8620_mt_write_stat(ctx, MHL_XDS_REG(AVLINK_MODE_CONTROL),
+ clk_spec[i].link_rate);
+ }
- sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2);
+ sii8620_set_infoframes(ctx);
}
static void sii8620_start_video(struct sii8620 *ctx)
{
- if (ctx->mode < CM_MHL3)
+ if (!sii8620_is_mhl3(ctx))
sii8620_stop_video(ctx);
switch (ctx->sink_type) {
@@ -757,44 +1251,6 @@ static void sii8620_enable_hpd(struct sii8620 *ctx)
);
}
-static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
-{
- if (ctx->gen2_write_burst)
- return;
-
- sii8620_write_seq_static(ctx,
- REG_MDT_RCV_TIMEOUT, 100,
- REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN
- );
- ctx->gen2_write_burst = 1;
-}
-
-static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
-{
- if (!ctx->gen2_write_burst)
- return;
-
- sii8620_write_seq_static(ctx,
- REG_MDT_XMIT_CTRL, 0,
- REG_MDT_RCV_CTRL, 0
- );
- ctx->gen2_write_burst = 0;
-}
-
-static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
-{
- sii8620_write_seq_static(ctx,
- REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
- | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
- | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
- | BIT_MDT_XMIT_SM_ERROR,
- REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
- | BIT_MDT_IDLE_AFTER_HAWB_DISABLE
- | BIT_MDT_RFIFO_DATA_RDY
- );
- sii8620_enable_gen2_write_burst(ctx);
-}
-
static void sii8620_mhl_discover(struct sii8620 *ctx)
{
sii8620_write_seq_static(ctx,
@@ -838,7 +1294,7 @@ static void sii8620_mhl_discover(struct sii8620 *ctx)
static void sii8620_peer_specific_init(struct sii8620 *ctx)
{
- if (ctx->mode == CM_MHL3)
+ if (sii8620_is_mhl3(ctx))
sii8620_write_seq_static(ctx,
REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
REG_EMSCINTRMASK1,
@@ -948,21 +1404,51 @@ static void sii8620_mhl_init(struct sii8620 *ctx)
);
sii8620_disable_gen2_write_burst(ctx);
- /* currently MHL3 is not supported, so we force version to 0 */
- sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0);
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), SII8620_MHL_VERSION);
sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
| MHL_DST_CONN_POW_STAT);
sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
}
+static void sii8620_emsc_enable(struct sii8620 *ctx)
+{
+ u8 reg;
+
+ sii8620_setbits(ctx, REG_GENCTL, BIT_GENCTL_EMSC_EN
+ | BIT_GENCTL_CLR_EMSC_RFIFO
+ | BIT_GENCTL_CLR_EMSC_XFIFO, ~0);
+ sii8620_setbits(ctx, REG_GENCTL, BIT_GENCTL_CLR_EMSC_RFIFO
+ | BIT_GENCTL_CLR_EMSC_XFIFO, 0);
+ sii8620_setbits(ctx, REG_COMMECNT, BIT_COMMECNT_I2C_TO_EMSC_EN, ~0);
+ reg = sii8620_readb(ctx, REG_EMSCINTR);
+ sii8620_write(ctx, REG_EMSCINTR, reg);
+ sii8620_write(ctx, REG_EMSCINTRMASK, BIT_EMSCINTR_SPI_DVLD);
+}
+
+static int sii8620_wait_for_fsm_state(struct sii8620 *ctx, u8 state)
+{
+ int i;
+
+ for (i = 0; i < 10; ++i) {
+ u8 s = sii8620_readb(ctx, REG_COC_STAT_0);
+
+ if ((s & MSK_COC_STAT_0_FSM_STATE) == state)
+ return 0;
+ if (!(s & BIT_COC_STAT_0_PLL_LOCKED))
+ return -EBUSY;
+ usleep_range(4000, 6000);
+ }
+ return -ETIMEDOUT;
+}
+
static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
{
+ int ret;
+
if (ctx->mode == mode)
return;
- ctx->mode = mode;
-
switch (mode) {
case CM_MHL1:
sii8620_write_seq_static(ctx,
@@ -972,15 +1458,46 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
| BIT_DPD_OSC_EN,
REG_COC_INTR_MASK, 0
);
+ ctx->mode = mode;
break;
case CM_MHL3:
+ sii8620_write(ctx, REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE);
+ ctx->mode = mode;
+ return;
+ case CM_ECBUS_S:
+ sii8620_emsc_enable(ctx);
sii8620_write_seq_static(ctx,
- REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
- REG_COC_CTL0, 0x40,
- REG_MHL_COC_CTL1, 0x07
+ REG_TTXSPINUMS, 4,
+ REG_TRXSPINUMS, 4,
+ REG_TTXHSICNUMS, 0x14,
+ REG_TRXHSICNUMS, 0x14,
+ REG_TTXTOTNUMS, 0x18,
+ REG_TRXTOTNUMS, 0x18,
+ REG_PWD_SRST, BIT_PWD_SRST_COC_DOC_RST
+ | BIT_PWD_SRST_CBUS_RST_SW_EN,
+ REG_MHL_COC_CTL1, 0xbd,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN,
+ REG_COC_CTLB, 0x01,
+ REG_COC_CTL0, 0x5c,
+ REG_COC_CTL14, 0x03,
+ REG_COC_CTL15, 0x80,
+ REG_MHL_DP_CTL6, BIT_MHL_DP_CTL6_DP_TAP1_SGN
+ | BIT_MHL_DP_CTL6_DP_TAP1_EN
+ | BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN,
+ REG_MHL_DP_CTL8, 0x03
);
- break;
+ ret = sii8620_wait_for_fsm_state(ctx, 0x03);
+ sii8620_write_seq_static(ctx,
+ REG_COC_CTL14, 0x00,
+ REG_COC_CTL15, 0x80
+ );
+ if (!ret)
+ sii8620_write(ctx, REG_CBUS3_CNVT, 0x85);
+ else
+ sii8620_disconnect(ctx);
+ return;
case CM_DISCONNECTED:
+ ctx->mode = mode;
break;
default:
dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
@@ -1007,10 +1524,12 @@ static void sii8620_disconnect(struct sii8620 *ctx)
{
sii8620_disable_gen2_write_burst(ctx);
sii8620_stop_video(ctx);
- msleep(50);
+ msleep(100);
sii8620_cbus_reset(ctx);
sii8620_set_mode(ctx, CM_DISCONNECTED);
sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, 0,
+ REG_MHL_PLL_CTL0, 0x07,
REG_COC_CTL0, 0x40,
REG_CBUS3_CNVT, 0x84,
REG_COC_CTL14, 0x00,
@@ -1123,24 +1642,45 @@ static void sii8620_irq_disc(struct sii8620 *ctx)
sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
}
+static void sii8620_read_burst(struct sii8620 *ctx)
+{
+ u8 buf[17];
+
+ sii8620_read_buf(ctx, REG_MDT_RCV_READ_PORT, buf, ARRAY_SIZE(buf));
+ sii8620_write(ctx, REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN |
+ BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN |
+ BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR);
+ sii8620_readb(ctx, REG_MDT_RFIFO_STAT);
+}
+
static void sii8620_irq_g2wb(struct sii8620 *ctx)
{
u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
- dev_dbg(ctx->dev, "HAWB idle\n");
+ if (sii8620_is_mhl3(ctx))
+ sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
+ MHL_INT_RC_FEAT_COMPLETE);
+
+ if (stat & BIT_MDT_RFIFO_DATA_RDY)
+ sii8620_read_burst(ctx);
+
+ if (stat & BIT_MDT_XFIFO_EMPTY)
+ sii8620_write(ctx, REG_MDT_XMIT_CTRL, 0);
sii8620_write(ctx, REG_MDT_INT_0, stat);
}
-static void sii8620_status_changed_dcap(struct sii8620 *ctx)
+static void sii8620_status_dcap_ready(struct sii8620 *ctx)
{
- if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) {
- sii8620_set_mode(ctx, CM_MHL1);
- sii8620_peer_specific_init(ctx);
- sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
- | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
- }
+ enum sii8620_mode mode;
+
+ mode = ctx->stat[MHL_DST_VERSION] >= 0x30 ? CM_MHL3 : CM_MHL1;
+ if (mode > ctx->mode)
+ sii8620_set_mode(ctx, mode);
+ sii8620_peer_specific_init(ctx);
+ sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
+ | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
}
static void sii8620_status_changed_path(struct sii8620 *ctx)
@@ -1149,7 +1689,9 @@ static void sii8620_status_changed_path(struct sii8620 *ctx)
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
MHL_DST_LM_CLK_MODE_NORMAL
| MHL_DST_LM_PATH_ENABLED);
- sii8620_mt_read_devcap(ctx, false);
+ if (!sii8620_is_mhl3(ctx))
+ sii8620_mt_read_devcap(ctx, false);
+ sii8620_mt_set_cont(ctx, sii8620_sink_detected);
} else {
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
MHL_DST_LM_CLK_MODE_NORMAL);
@@ -1166,19 +1708,75 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
- if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
- sii8620_status_changed_dcap(ctx);
+ if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+ sii8620_status_dcap_ready(ctx);
if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
sii8620_status_changed_path(ctx);
}
+static void sii8620_ecbus_up(struct sii8620 *ctx, int ret)
+{
+ if (ret < 0)
+ return;
+
+ sii8620_set_mode(ctx, CM_ECBUS_S);
+}
+
+static void sii8620_got_ecbus_speed(struct sii8620 *ctx, int ret)
+{
+ if (ret < 0)
+ return;
+
+ sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
+ MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
+ sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
+ sii8620_mt_set_cont(ctx, sii8620_ecbus_up);
+}
+
+static void sii8620_mhl_burst_emsc_support_set(struct mhl_burst_emsc_support *d,
+ enum mhl_burst_id id)
+{
+ sii8620_mhl_burst_hdr_set(&d->hdr, MHL_BURST_ID_EMSC_SUPPORT);
+ d->num_entries = 1;
+ d->burst_id[0] = cpu_to_be16(id);
+}
+
+static void sii8620_send_features(struct sii8620 *ctx)
+{
+ u8 buf[16];
+
+ sii8620_write(ctx, REG_MDT_XMIT_CTRL, BIT_MDT_XMIT_CTRL_EN
+ | BIT_MDT_XMIT_CTRL_FIXED_BURST_LEN);
+ sii8620_mhl_burst_emsc_support_set((void *)buf,
+ MHL_BURST_ID_HID_PAYLOAD);
+ sii8620_write_buf(ctx, REG_MDT_XMIT_WRITE_PORT, buf, ARRAY_SIZE(buf));
+}
+
static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
{
u8 ints[MHL_INT_SIZE];
sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+
+ if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_DCAP_CHG) {
+ switch (ctx->mode) {
+ case CM_MHL3:
+ sii8620_mt_read_xdevcap_reg(ctx, MHL_XDC_ECBUS_SPEEDS);
+ sii8620_mt_set_cont(ctx, sii8620_got_ecbus_speed);
+ break;
+ case CM_ECBUS_S:
+ sii8620_mt_read_devcap(ctx, true);
+ break;
+ default:
+ break;
+ }
+ }
+ if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
+ sii8620_send_features(ctx);
+ if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE)
+ sii8620_edid_read(ctx, 0);
}
static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1261,6 +1859,19 @@ static void sii8620_irq_coc(struct sii8620 *ctx)
{
u8 stat = sii8620_readb(ctx, REG_COC_INTR);
+ if (stat & BIT_COC_CALIBRATION_DONE) {
+ u8 cstat = sii8620_readb(ctx, REG_COC_STAT_0);
+
+ cstat &= BIT_COC_STAT_0_PLL_LOCKED | MSK_COC_STAT_0_FSM_STATE;
+ if (cstat == (BIT_COC_STAT_0_PLL_LOCKED | 0x02)) {
+ sii8620_write_seq_static(ctx,
+ REG_COC_CTLB, 0,
+ REG_TRXINTMH, BIT_TDM_INTR_SYNC_DATA
+ | BIT_TDM_INTR_SYNC_WAIT
+ );
+ }
+ }
+
sii8620_write(ctx, REG_COC_INTR, stat);
}
@@ -1289,17 +1900,6 @@ static void sii8620_scdt_high(struct sii8620 *ctx)
);
}
-static void sii8620_scdt_low(struct sii8620 *ctx)
-{
- sii8620_write(ctx, REG_TMDS_CSTAT_P3,
- BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS |
- BIT_TMDS_CSTAT_P3_CLR_AVI);
-
- sii8620_stop_video(ctx);
-
- sii8620_write(ctx, REG_INTR8_MASK, 0);
-}
-
static void sii8620_irq_scdt(struct sii8620 *ctx)
{
u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1309,8 +1909,6 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
sii8620_scdt_high(ctx);
- else
- sii8620_scdt_low(ctx);
}
sii8620_write(ctx, REG_INTR5, stat);
@@ -1351,6 +1949,65 @@ static void sii8620_irq_infr(struct sii8620 *ctx)
sii8620_start_video(ctx);
}
+static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
+{
+ if (ret < 0)
+ return;
+
+ sii8620_mt_read_devcap(ctx, false);
+}
+
+static void sii8620_irq_tdm(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_TRXINTH);
+ u8 tdm = sii8620_readb(ctx, REG_TRXSTA2);
+
+ if ((tdm & MSK_TDM_SYNCHRONIZED) == VAL_TDM_SYNCHRONIZED) {
+ ctx->mode = CM_ECBUS_S;
+ ctx->burst.rx_ack = 0;
+ ctx->burst.r_size = SII8620_BURST_BUF_LEN;
+ sii8620_burst_tx_rbuf_info(ctx, SII8620_BURST_BUF_LEN);
+ sii8620_mt_read_devcap(ctx, true);
+ sii8620_mt_set_cont(ctx, sii8620_got_xdevcap);
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_MHL_PLL_CTL2, 0,
+ REG_MHL_PLL_CTL2, BIT_MHL_PLL_CTL2_CLKDETECT_EN
+ );
+ }
+
+ sii8620_write(ctx, REG_TRXINTH, stat);
+}
+
+static void sii8620_irq_block(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_EMSCINTR);
+
+ if (stat & BIT_EMSCINTR_SPI_DVLD) {
+ u8 bstat = sii8620_readb(ctx, REG_SPIBURSTSTAT);
+
+ if (bstat & BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE)
+ sii8620_burst_receive(ctx);
+ }
+
+ sii8620_write(ctx, REG_EMSCINTR, stat);
+}
+
+static void sii8620_irq_ddc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR3);
+
+ if (stat & BIT_DDC_CMD_DONE) {
+ sii8620_write(ctx, REG_INTR3_MASK, 0);
+ if (sii8620_is_mhl3(ctx))
+ sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
+ MHL_INT_RC_FEAT_REQ);
+ else
+ sii8620_edid_read(ctx, 0);
+ }
+ sii8620_write(ctx, REG_INTR3, stat);
+}
+
/* endian agnostic, non-volatile version of test_bit */
static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
{
@@ -1366,9 +2023,12 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
{ BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
{ BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
{ BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
+ { BIT_FAST_INTR_STAT_TDM, sii8620_irq_tdm },
{ BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
{ BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
+ { BIT_FAST_INTR_STAT_BLOCK, sii8620_irq_block },
{ BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
+ { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
{ BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
{ BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
};
@@ -1383,7 +2043,9 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
if (sii8620_test_bit(irq_vec[i].bit, stats))
irq_vec[i].handler(ctx);
+ sii8620_burst_rx_all(ctx);
sii8620_mt_work(ctx);
+ sii8620_burst_send(ctx);
ret = sii8620_clear_error(ctx);
if (ret) {
@@ -1450,22 +2112,41 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
struct drm_display_mode *adjusted_mode)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
- bool ret = false;
- int max_clock = 74250;
+ int max_lclk;
+ bool ret = true;
mutex_lock(&ctx->lock);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- goto out;
-
- if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL)
- max_clock = 300000;
-
- ret = mode->clock <= max_clock;
-
-out:
+ max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK;
+ if (max_lclk > 3 * adjusted_mode->clock) {
+ ctx->use_packed_pixel = 0;
+ goto end;
+ }
+ if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
+ max_lclk > 2 * adjusted_mode->clock) {
+ ctx->use_packed_pixel = 1;
+ goto end;
+ }
+ ret = false;
+end:
+ if (ret) {
+ u8 vic = drm_match_cea_mode(adjusted_mode);
+
+ if (!vic) {
+ union hdmi_infoframe frm;
+ u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
+
+ drm_hdmi_vendor_infoframe_from_display_mode(
+ &frm.vendor.hdmi, adjusted_mode);
+ vic = frm.vendor.hdmi.vic;
+ if (vic >= ARRAY_SIZE(mhl_vic))
+ vic = 0;
+ vic = mhl_vic[vic];
+ }
+ ctx->video_code = vic;
+ ctx->pixel_clock = adjusted_mode->clock;
+ }
mutex_unlock(&ctx->lock);
-
return ret;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h
index 6ff616a4f6ce..51ab540cf092 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.h
+++ b/drivers/gpu/drm/bridge/sil-sii8620.h
@@ -353,7 +353,7 @@
#define REG_TTXNUMB 0x0116
#define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0
#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3)
-#define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07
+#define MSK_TTXNUMB_TTX_NUMBPS 0x07
/* TDM TX NUMSPISYM, default value: 0x04 */
#define REG_TTXSPINUMS 0x0117
@@ -403,12 +403,16 @@
/* TDM RX Status 2nd, default value: 0x00 */
#define REG_TRXSTA2 0x015c
+#define MSK_TDM_SYNCHRONIZED 0xc0
+#define VAL_TDM_SYNCHRONIZED 0x80
/* TDM RX INT Low, default value: 0x00 */
#define REG_TRXINTL 0x0163
/* TDM RX INT High, default value: 0x00 */
#define REG_TRXINTH 0x0164
+#define BIT_TDM_INTR_SYNC_DATA BIT(0)
+#define BIT_TDM_INTR_SYNC_WAIT BIT(1)
/* TDM RX INTMASK High, default value: 0x00 */
#define REG_TRXINTMH 0x0166
@@ -429,12 +433,14 @@
/* HSIC Keeper, default value: 0x00 */
#define REG_KEEPER 0x0181
-#define MSK_KEEPER_KEEPER_MODE_1_0 0x03
+#define MSK_KEEPER_MODE 0x03
+#define VAL_KEEPER_MODE_HOST 0
+#define VAL_KEEPER_MODE_DEVICE 2
/* HSIC Flow Control General, default value: 0x02 */
#define REG_FCGC 0x0183
-#define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1)
-#define BIT_FCGC_HSIC_FC_ENABLE BIT(0)
+#define BIT_FCGC_HSIC_HOSTMODE BIT(1)
+#define BIT_FCGC_HSIC_ENABLE BIT(0)
/* HSIC Flow Control CTR13, default value: 0xfc */
#define REG_FCCTR13 0x0191
@@ -841,6 +847,8 @@
#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0
#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f
+#define REG_MHL_DP_CTL8 0x0352
+
/* Tx Zone Ctl1, default value: 0x00 */
#define REG_TX_ZONE_CTL1 0x0361
#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08
@@ -1078,16 +1086,26 @@
/* TPI Info Frame Select, default value: 0x00 */
#define REG_TPI_INFO_FSEL 0x06bf
-#define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7)
-#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6)
-#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5)
-#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07
+#define BIT_TPI_INFO_FSEL_EN BIT(7)
+#define BIT_TPI_INFO_FSEL_RPT BIT(6)
+#define BIT_TPI_INFO_FSEL_READ_FLAG BIT(5)
+#define MSK_TPI_INFO_FSEL_PKT 0x07
+#define VAL_TPI_INFO_FSEL_AVI 0x00
+#define VAL_TPI_INFO_FSEL_SPD 0x01
+#define VAL_TPI_INFO_FSEL_AUD 0x02
+#define VAL_TPI_INFO_FSEL_MPG 0x03
+#define VAL_TPI_INFO_FSEL_GEN 0x04
+#define VAL_TPI_INFO_FSEL_GEN2 0x05
+#define VAL_TPI_INFO_FSEL_VSI 0x06
/* TPI Info Byte #0, default value: 0x00 */
#define REG_TPI_INFO_B0 0x06c0
/* CoC Status, default value: 0x00 */
#define REG_COC_STAT_0 0x0700
+#define BIT_COC_STAT_0_PLL_LOCKED BIT(7)
+#define MSK_COC_STAT_0_FSM_STATE 0x0f
+
#define REG_COC_STAT_1 0x0701
#define REG_COC_STAT_2 0x0702
#define REG_COC_STAT_3 0x0703
@@ -1282,14 +1300,14 @@
/* MDT Transmit Control, default value: 0x70 */
#define REG_MDT_XMIT_CTRL 0x0588
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4)
-#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
-#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2)
-#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1)
-#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0)
+#define BIT_MDT_XMIT_CTRL_EN BIT(7)
+#define BIT_MDT_XMIT_CTRL_CMD_MERGE_EN BIT(6)
+#define BIT_MDT_XMIT_CTRL_FIXED_BURST_LEN BIT(5)
+#define BIT_MDT_XMIT_CTRL_FIXED_AID BIT(4)
+#define BIT_MDT_XMIT_CTRL_SINGLE_RUN_EN BIT(3)
+#define BIT_MDT_XMIT_CTRL_CLR_ABORT_WAIT BIT(2)
+#define BIT_MDT_XMIT_CTRL_XFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_XMIT_CTRL_XFIFO_CLR_CUR BIT(0)
/* MDT Receive WRITE Port, default value: 0x00 */
#define REG_MDT_XMIT_WRITE_PORT 0x0589
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 7f4cc6e172ab..fc78c90ee931 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -1,6 +1,6 @@
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
- depends on DRM && PCI
+ depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_TTM
help
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 2188d6b61b3e..8690352d96f7 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -13,6 +13,7 @@
#include <video/vga.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/ttm/ttm_bo_api.h>
@@ -230,7 +231,7 @@ irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
/* cirrus_kms.c */
int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
-int cirrus_driver_unload(struct drm_device *dev);
+void cirrus_driver_unload(struct drm_device *dev);
extern struct drm_ioctl_desc cirrus_ioctls[];
extern int cirrus_max_ioctl;
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 3a6309d7d8e4..4cc679278182 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -22,7 +22,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
struct drm_gem_object *obj;
struct cirrus_bo *bo;
int src_offset, dst_offset;
- int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
+ int bpp = afbdev->gfb.base.format->cpp[0];
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
@@ -218,7 +218,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT;
info->fbops = &cirrusfb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
sizes->fb_height);
@@ -238,7 +238,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
- DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
return 0;
@@ -289,7 +289,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
&cirrus_fb_helper_funcs);
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
- cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+ CIRRUSFB_CONN_LIMIT);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 2c3c0d4072ce..e7fc95f63dca 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -34,7 +34,7 @@ int cirrus_framebuffer_init(struct drm_device *dev,
{
int ret;
- drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
gfb->obj = obj;
ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
if (ret) {
@@ -208,18 +208,17 @@ out:
return r;
}
-int cirrus_driver_unload(struct drm_device *dev)
+void cirrus_driver_unload(struct drm_device *dev)
{
struct cirrus_device *cdev = dev->dev_private;
if (cdev == NULL)
- return 0;
+ return;
cirrus_modeset_fini(cdev);
cirrus_mm_fini(cdev);
cirrus_device_fini(cdev);
kfree(cdev);
dev->dev_private = NULL;
- return 0;
}
int cirrus_gem_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 9a4a27c1afd2..ed43ab10ac99 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -185,6 +185,7 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct cirrus_device *cdev = dev->dev_private;
+ const struct drm_framebuffer *fb = crtc->primary->fb;
int hsyncstart, hsyncend, htotal, hdispend;
int vtotal, vdispend;
int tmp;
@@ -257,7 +258,7 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
sr07 = RREG8(SEQ_DATA);
sr07 &= 0xe0;
hdr = 0;
- switch (crtc->primary->fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 8:
sr07 |= 0x11;
break;
@@ -280,13 +281,13 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
WREG_SEQ(0x7, sr07);
/* Program the pitch */
- tmp = crtc->primary->fb->pitches[0] / 8;
+ tmp = fb->pitches[0] / 8;
WREG_CRT(VGA_CRTC_OFFSET, tmp);
/* Enable extended blanking and pitch bits, and enable full memory */
tmp = 0x22;
- tmp |= (crtc->primary->fb->pitches[0] >> 7) & 0x10;
- tmp |= (crtc->primary->fb->pitches[0] >> 6) & 0x40;
+ tmp |= (fb->pitches[0] >> 7) & 0x10;
+ tmp |= (fb->pitches[0] >> 6) & 0x40;
WREG_CRT(0x1b, tmp);
/* Enable high-colour modes */
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index d6da848f7c6f..f53aa8f4a143 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -236,8 +236,6 @@ struct ttm_bo_driver cirrus_bo_driver = {
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
- .lru_tail = &ttm_bo_default_lru_tail,
- .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int cirrus_mm_init(struct cirrus_device *cirrus)
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index d621c8a4cf00..c89953449e96 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -421,6 +421,8 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
head->base = head->agp_info.aper_base;
return head;
}
+/* Only exported for i810.ko */
+EXPORT_SYMBOL(drm_agp_init);
/**
* drm_legacy_agp_clear - Clear AGP resource list
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index fdfb1ec17e66..a5673107db26 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -35,19 +35,14 @@
#include "drm_crtc_internal.h"
-static void crtc_commit_free(struct kref *kref)
+void __drm_crtc_commit_free(struct kref *kref)
{
struct drm_crtc_commit *commit =
container_of(kref, struct drm_crtc_commit, ref);
kfree(commit);
}
-
-void drm_crtc_commit_put(struct drm_crtc_commit *commit)
-{
- kref_put(&commit->ref, crtc_commit_free);
-}
-EXPORT_SYMBOL(drm_crtc_commit_put);
+EXPORT_SYMBOL(__drm_crtc_commit_free);
/**
* drm_atomic_state_default_release -
@@ -200,8 +195,8 @@ EXPORT_SYMBOL(drm_atomic_state_default_clear);
* all locks. So someone else could sneak in and change the current modeset
* configuration. Which means that all the state assembled in @state is no
* longer an atomic update to the current state, but to some arbitrary earlier
- * state. Which could break assumptions the driver's ->atomic_check likely
- * relies on.
+ * state. Which could break assumptions the driver's
+ * &drm_mode_config_funcs.atomic_check likely relies on.
*
* Hence we must clear all cached state and completely start over, using this
* function.
@@ -312,9 +307,8 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
* @state: the CRTC whose incoming state to update
* @mode: kernel-internal mode to use for the CRTC, or NULL to disable
*
- * Set a mode (originating from the kernel) on the desired CRTC state. Does
- * not change any other state properties, including enable, active, or
- * mode_changed.
+ * Set a mode (originating from the kernel) on the desired CRTC state and update
+ * the enable property.
*
* RETURNS:
* Zero on success, error code on failure. Cannot return -EDEADLK.
@@ -461,11 +455,10 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
* @property: the property to set
* @val: the new property value
*
- * Use this instead of calling crtc->atomic_set_property directly.
- * This function handles generic/core properties and calls out to
- * driver's ->atomic_set_property() for driver properties. To ensure
- * consistent behavior you must call this function rather than the
- * driver hook directly.
+ * This function handles generic/core properties and calls out to driver's
+ * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure
+ * consistent behavior you must call this function rather than the driver hook
+ * directly.
*
* RETURNS:
* Zero on success, error code on failure
@@ -537,10 +530,10 @@ EXPORT_SYMBOL(drm_atomic_crtc_set_property);
* @property: the property to set
* @val: return location for the property value
*
- * This function handles generic/core properties and calls out to
- * driver's ->atomic_get_property() for driver properties. To ensure
- * consistent behavior you must call this function rather than the
- * driver hook directly.
+ * This function handles generic/core properties and calls out to driver's
+ * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure
+ * consistent behavior you must call this function rather than the driver hook
+ * directly.
*
* RETURNS:
* Zero on success, error code on failure
@@ -721,11 +714,10 @@ EXPORT_SYMBOL(drm_atomic_get_plane_state);
* @property: the property to set
* @val: the new property value
*
- * Use this instead of calling plane->atomic_set_property directly.
- * This function handles generic/core properties and calls out to
- * driver's ->atomic_set_property() for driver properties. To ensure
- * consistent behavior you must call this function rather than the
- * driver hook directly.
+ * This function handles generic/core properties and calls out to driver's
+ * &drm_plane_funcs.atomic_set_property for driver properties. To ensure
+ * consistent behavior you must call this function rather than the driver hook
+ * directly.
*
* RETURNS:
* Zero on success, error code on failure
@@ -796,10 +788,10 @@ EXPORT_SYMBOL(drm_atomic_plane_set_property);
* @property: the property to set
* @val: return location for the property value
*
- * This function handles generic/core properties and calls out to
- * driver's ->atomic_get_property() for driver properties. To ensure
- * consistent behavior you must call this function rather than the
- * driver hook directly.
+ * This function handles generic/core properties and calls out to driver's
+ * &drm_plane_funcs.atomic_get_property for driver properties. To ensure
+ * consistent behavior you must call this function rather than the driver hook
+ * directly.
*
* RETURNS:
* Zero on success, error code on failure
@@ -902,11 +894,11 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
}
/* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
+ ret = drm_plane_check_pixel_format(plane, state->fb->format->format);
if (ret) {
struct drm_format_name_buf format_name;
DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
- drm_get_format_name(state->fb->pixel_format,
+ drm_get_format_name(state->fb->format->format,
&format_name));
return ret;
}
@@ -960,11 +952,11 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
if (state->fb) {
struct drm_framebuffer *fb = state->fb;
- int i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = fb->format->num_planes;
struct drm_format_name_buf format_name;
drm_printf(p, "\t\tformat=%s\n",
- drm_get_format_name(fb->pixel_format, &format_name));
+ drm_get_format_name(fb->format->format, &format_name));
drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier);
drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
drm_printf(p, "\t\tlayers:\n");
@@ -1062,11 +1054,10 @@ EXPORT_SYMBOL(drm_atomic_get_connector_state);
* @property: the property to set
* @val: the new property value
*
- * Use this instead of calling connector->atomic_set_property directly.
- * This function handles generic/core properties and calls out to
- * driver's ->atomic_set_property() for driver properties. To ensure
- * consistent behavior you must call this function rather than the
- * driver hook directly.
+ * This function handles generic/core properties and calls out to driver's
+ * &drm_connector_funcs.atomic_set_property for driver properties. To ensure
+ * consistent behavior you must call this function rather than the driver hook
+ * directly.
*
* RETURNS:
* Zero on success, error code on failure
@@ -1141,10 +1132,10 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
* @property: the property to set
* @val: return location for the property value
*
- * This function handles generic/core properties and calls out to
- * driver's ->atomic_get_property() for driver properties. To ensure
- * consistent behavior you must call this function rather than the
- * driver hook directly.
+ * This function handles generic/core properties and calls out to driver's
+ * &drm_connector_funcs.atomic_get_property for driver properties. To ensure
+ * consistent behavior you must call this function rather than the driver hook
+ * directly.
*
* RETURNS:
* Zero on success, error code on failure
@@ -1317,12 +1308,11 @@ EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
* implicit or explicit fencing.
*
* This function will not set the fence to the state if it was set
- * via explicit fencing interfaces on the atomic ioctl. It will
- * all drope the reference to the fence as we not storing it
- * anywhere.
- *
- * Otherwise, if plane_state->fence is not set this function we
- * just set it with the received implict fence.
+ * via explicit fencing interfaces on the atomic ioctl. In that case it will
+ * drop the reference to the fence as we are not storing it anywhere.
+ * Otherwise, if &drm_plane_state.fence is not set this function we just set it
+ * with the received implicit fence. In both cases this function consumes a
+ * reference for @fence.
*/
void
drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
@@ -1417,6 +1407,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_mode_config *config = &state->dev->mode_config;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
+ struct drm_connector_list_iter conn_iter;
int ret;
ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
@@ -1430,14 +1421,18 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
* Changed connectors are already in @state, so only need to look at the
* current configuration.
*/
- drm_for_each_connector(connector, state->dev) {
+ drm_connector_list_iter_get(state->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->crtc != crtc)
continue;
conn_state = drm_atomic_get_connector_state(state, connector);
- if (IS_ERR(conn_state))
+ if (IS_ERR(conn_state)) {
+ drm_connector_list_iter_put(&conn_iter);
return PTR_ERR(conn_state);
+ }
}
+ drm_connector_list_iter_put(&conn_iter);
return 0;
}
@@ -1594,10 +1589,8 @@ EXPORT_SYMBOL(drm_atomic_check_only);
* more locks but encountered a deadlock. The caller must then do the usual w/w
* backoff dance and restart. All other errors are fatal.
*
- * Also note that on successful execution ownership of @state is transferred
- * from the caller of this function to the function itself. The caller must not
- * free or in any other way access @state. If the function fails then the caller
- * must clean up @state itself.
+ * This function will take its own reference on @state.
+ * Callers should always release their reference with drm_atomic_state_put().
*
* Returns:
* 0 on success, negative error code on failure.
@@ -1618,17 +1611,15 @@ int drm_atomic_commit(struct drm_atomic_state *state)
EXPORT_SYMBOL(drm_atomic_commit);
/**
- * drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit
+ * drm_atomic_nonblocking_commit - atomic nonblocking commit
* @state: atomic configuration to check
*
* Note that this function can return -EDEADLK if the driver needed to acquire
* more locks but encountered a deadlock. The caller must then do the usual w/w
* backoff dance and restart. All other errors are fatal.
*
- * Also note that on successful execution ownership of @state is transferred
- * from the caller of this function to the function itself. The caller must not
- * free or in any other way access @state. If the function fails then the caller
- * must clean up @state itself.
+ * This function will take its own reference on @state.
+ * Callers should always release their reference with drm_atomic_state_put().
*
* Returns:
* 0 on success, negative error code on failure.
@@ -1692,6 +1683,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
return;
@@ -1702,8 +1694,10 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
list_for_each_entry(crtc, &config->crtc_list, head)
drm_atomic_crtc_print_state(p, crtc->state);
- list_for_each_entry(connector, &config->connector_list, head)
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
drm_atomic_connector_print_state(p, connector->state);
+ drm_connector_list_iter_put(&conn_iter);
}
EXPORT_SYMBOL(drm_state_dump);
@@ -1732,13 +1726,6 @@ int drm_atomic_debugfs_init(struct drm_minor *minor)
ARRAY_SIZE(drm_atomic_debugfs_list),
minor->debugfs_root, minor);
}
-
-int drm_atomic_debugfs_cleanup(struct drm_minor *minor)
-{
- return drm_debugfs_remove_files(drm_atomic_debugfs_list,
- ARRAY_SIZE(drm_atomic_debugfs_list),
- minor);
-}
#endif
/*
@@ -1830,10 +1817,10 @@ static int atomic_set_prop(struct drm_atomic_state *state,
* @plane_mask: plane mask for planes that were updated.
* @ret: return value, can be -EDEADLK for a retry.
*
- * Before doing an update plane->old_fb is set to plane->fb,
- * but before dropping the locks old_fb needs to be set to NULL
- * and plane->fb updated. This is a common operation for each
- * atomic update, so this call is split off as a helper.
+ * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
+ * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
+ * is a common operation for each atomic update, so this call is split off as a
+ * helper.
*/
void drm_atomic_clean_old_fb(struct drm_device *dev,
unsigned plane_mask,
@@ -1874,7 +1861,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
* As a contrast, with implicit fencing the kernel keeps track of any
* ongoing rendering, and automatically ensures that the atomic update waits
* for any pending rendering to complete. For shared buffers represented with
- * a struct &dma_buf this is tracked in &reservation_object structures.
+ * a &struct dma_buf this is tracked in &struct reservation_object.
* Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
* whereas explicit fencing is what Android wants.
*
@@ -1890,7 +1877,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
* it will only check if the Sync File is a valid one.
*
* On the driver side the fence is stored on the @fence parameter of
- * struct &drm_plane_state. Drivers which also support implicit fencing
+ * &struct drm_plane_state. Drivers which also support implicit fencing
* should set the implicit fence using drm_atomic_set_fence_for_plane(),
* to make sure there's consistent behaviour between drivers in precedence
* of implicit vs. explicit fencing.
@@ -1909,7 +1896,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
* DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
*
* Note that out-fences don't have a special interface to drivers and are
- * internally represented by a struct &drm_pending_vblank_event in struct
+ * internally represented by a &struct drm_pending_vblank_event in struct
* &drm_crtc_state, which is also used by the nonblocking atomic commit
* helpers and for the DRM event handling for existing userspace.
*/
@@ -2198,10 +2185,6 @@ retry:
goto out;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
- /*
- * Unlike commit, check_only does not clean up state.
- * Below we call drm_atomic_state_put for it.
- */
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4594477dee00..01d936b7be43 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -56,9 +56,9 @@
* implement these functions themselves but must use the provided helpers.
*
* The atomic helper uses the same function table structures as all other
- * modesetting helpers. See the documentation for struct &drm_crtc_helper_funcs,
- * struct &drm_encoder_helper_funcs and struct &drm_connector_helper_funcs. It
- * also shares the struct &drm_plane_helper_funcs function table with the plane
+ * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
+ * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
+ * also shares the &struct drm_plane_helper_funcs function table with the plane
* helpers.
*/
static void
@@ -94,9 +94,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
{
struct drm_connector_state *conn_state;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
struct drm_encoder *encoder;
unsigned encoder_mask = 0;
- int i, ret;
+ int i, ret = 0;
/*
* First loop, find all newly assigned encoders from the connectors
@@ -144,7 +145,8 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
* and the crtc is disabled if no encoder is left. This preserves
* compatibility with the legacy set_config behavior.
*/
- drm_for_each_connector(connector, state->dev) {
+ drm_connector_list_iter_get(state->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
struct drm_crtc_state *crtc_state;
if (drm_atomic_get_existing_connector_state(state, connector))
@@ -160,12 +162,15 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
connector->state->crtc->base.id,
connector->state->crtc->name,
connector->base.id, connector->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
conn_state = drm_atomic_get_connector_state(state, connector);
- if (IS_ERR(conn_state))
- return PTR_ERR(conn_state);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ goto out;
+ }
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
encoder->base.id, encoder->name,
@@ -176,19 +181,21 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret)
- return ret;
+ goto out;
if (!crtc_state->connector_mask) {
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
NULL);
if (ret < 0)
- return ret;
+ goto out;
crtc_state->active = false;
}
}
+out:
+ drm_connector_list_iter_put(&conn_iter);
- return 0;
+ return ret;
}
static void
@@ -362,7 +369,7 @@ mode_fixup(struct drm_atomic_state *state)
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i;
- bool ret;
+ int ret;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->mode_changed &&
@@ -451,22 +458,25 @@ mode_fixup(struct drm_atomic_state *state)
* Check the state object to see if the requested state is physically possible.
* This does all the crtc and connector related computations for an atomic
* update and adds any additional connectors needed for full modesets and calls
- * down into ->mode_fixup functions of the driver backend.
- *
- * crtc_state->mode_changed is set when the input mode is changed.
- * crtc_state->connectors_changed is set when a connector is added or
- * removed from the crtc.
- * crtc_state->active_changed is set when crtc_state->active changes,
- * which is used for dpms.
+ * down into &drm_crtc_helper_funcs.mode_fixup and
+ * &drm_encoder_helper_funcs.mode_fixup or
+ * &drm_encoder_helper_funcs.atomic_check functions of the driver backend.
+ *
+ * &drm_crtc_state.mode_changed is set when the input mode is changed.
+ * &drm_crtc_state.connectors_changed is set when a connector is added or
+ * removed from the crtc. &drm_crtc_state.active_changed is set when
+ * &drm_crtc_state.active changes, which is used for DPMS.
* See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
*
- * Drivers which set ->mode_changed (e.g. in their ->atomic_check hooks if a
- * plane update can't be done without a full modeset) _must_ call this function
- * afterwards after that change. It is permitted to call this function multiple
- * times for the same update, e.g. when the ->atomic_check functions depend upon
- * the adjusted dotclock for fifo space allocation and watermark computation.
+ * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
+ * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
+ * without a full modeset) _must_ call this function afterwards after that
+ * change. It is permitted to call this function multiple times for the same
+ * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend
+ * upon the adjusted dotclock for fifo space allocation and watermark
+ * computation.
*
* RETURNS:
* Zero for success or -errno
@@ -577,9 +587,10 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
*
* Check the state object to see if the requested state is physically possible.
* This does all the plane update related checks using by calling into the
- * ->atomic_check hooks provided by the driver.
+ * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
+ * hooks provided by the driver.
*
- * It also sets crtc_state->planes_changed to indicate that a crtc has
+ * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has
* updated planes.
*
* RETURNS:
@@ -641,14 +652,15 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
* Check the state object to see if the requested state is physically possible.
* Only crtcs and planes have check callbacks, so for any additional (global)
* checking that a driver needs it can simply wrap that around this function.
- * Drivers without such needs can directly use this as their ->atomic_check()
- * callback.
+ * Drivers without such needs can directly use this as their
+ * &drm_mode_config_funcs.atomic_check callback.
*
* This just wraps the two parts of the state checking for planes and modeset
* state in the default order: First it calls drm_atomic_helper_check_modeset()
* and then drm_atomic_helper_check_planes(). The assumption is that the
- * ->atomic_check functions depend upon an updated adjusted_mode.clock to
- * e.g. properly compute watermarks.
+ * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
+ * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
+ * watermarks.
*
* RETURNS:
* Zero for success or -errno
@@ -1058,41 +1070,6 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
/**
- * drm_atomic_helper_framebuffer_changed - check if framebuffer has changed
- * @dev: DRM device
- * @old_state: atomic state object with old state structures
- * @crtc: DRM crtc
- *
- * Checks whether the framebuffer used for this CRTC changes as a result of
- * the atomic update. This is useful for drivers which cannot use
- * drm_atomic_helper_wait_for_vblanks() and need to reimplement its
- * functionality.
- *
- * Returns:
- * true if the framebuffer changed.
- */
-bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
- struct drm_atomic_state *old_state,
- struct drm_crtc *crtc)
-{
- struct drm_plane *plane;
- struct drm_plane_state *old_plane_state;
- int i;
-
- for_each_plane_in_state(old_state, plane, old_plane_state, i) {
- if (plane->state->crtc != crtc &&
- old_plane_state->crtc != crtc)
- continue;
-
- if (plane->state->fb != old_plane_state->fb)
- return true;
- }
-
- return false;
-}
-EXPORT_SYMBOL(drm_atomic_helper_framebuffer_changed);
-
-/**
* drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
* @dev: DRM device
* @old_state: atomic state object with old state structures
@@ -1110,39 +1087,35 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i, ret;
+ unsigned crtc_mask = 0;
- for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- /* No one cares about the old state, so abuse it for tracking
- * and store whether we hold a vblank reference (and should do a
- * vblank wait) in the ->enable boolean. */
- old_crtc_state->enable = false;
-
- if (!crtc->state->enable)
- continue;
+ /*
+ * Legacy cursor ioctls are completely unsynced, and userspace
+ * relies on that (by doing tons of cursor updates).
+ */
+ if (old_state->legacy_cursor_update)
+ return;
- /* Legacy cursor ioctls are completely unsynced, and userspace
- * relies on that (by doing tons of cursor updates). */
- if (old_state->legacy_cursor_update)
- continue;
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct drm_crtc_state *new_crtc_state = crtc->state;
- if (!drm_atomic_helper_framebuffer_changed(dev,
- old_state, crtc))
+ if (!new_crtc_state->active || !new_crtc_state->planes_changed)
continue;
ret = drm_crtc_vblank_get(crtc);
if (ret != 0)
continue;
- old_crtc_state->enable = true;
- old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc);
+ crtc_mask |= drm_crtc_mask(crtc);
+ old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
}
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- if (!old_crtc_state->enable)
+ if (!(crtc_mask & drm_crtc_mask(crtc)))
continue;
ret = wait_event_timeout(dev->vblank[i].queue,
- old_crtc_state->last_vblank_count !=
+ old_state->crtcs[i].last_vblank_count !=
drm_crtc_vblank_count(crtc),
msecs_to_jiffies(50));
@@ -1157,8 +1130,8 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
* drm_atomic_helper_commit_tail - commit atomic update to hardware
* @old_state: atomic state object with old state structures
*
- * This is the default implemenation for the ->atomic_commit_tail() hook of the
- * &drm_mode_config_helper_funcs vtable.
+ * This is the default implementation for the
+ * &drm_mode_config_helper_funcs.atomic_commit_tail hook.
*
* Note that the default ordering of how the various stages are called is to
* match the legacy modeset helper library closest. One peculiarity of that is
@@ -1235,8 +1208,8 @@ static void commit_work(struct work_struct *work)
* drm_atomic_helper_setup_commit() and related functions.
*
* Committing the actual hardware state is done through the
- * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable,
- * or it's default implementation drm_atomic_helper_commit_tail().
+ * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or it's default
+ * implementation drm_atomic_helper_commit_tail().
*
* RETURNS:
* Zero for success or -errno.
@@ -1389,6 +1362,15 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock)
return ret < 0 ? ret : 0;
}
+static void release_crtc_commit(struct completion *completion)
+{
+ struct drm_crtc_commit *commit = container_of(completion,
+ typeof(*commit),
+ flip_done);
+
+ drm_crtc_commit_put(commit);
+}
+
/**
* drm_atomic_helper_setup_commit - setup possibly nonblocking commit
* @state: new modeset state to be committed
@@ -1396,14 +1378,15 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock)
*
* This function prepares @state to be used by the atomic helper's support for
* nonblocking commits. Drivers using the nonblocking commit infrastructure
- * should always call this function from their ->atomic_commit hook.
+ * should always call this function from their
+ * &drm_mode_config_funcs.atomic_commit hook.
*
* To be able to use this support drivers need to use a few more helper
* functions. drm_atomic_helper_wait_for_dependencies() must be called before
* actually committing the hardware state, and for nonblocking commits this call
* must be placed in the async worker. See also drm_atomic_helper_swap_state()
* and it's stall parameter, for when a driver's commit hooks look at the
- * ->state pointers of struct &drm_crtc, &drm_plane or &drm_connector directly.
+ * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
*
* Completion of the hardware commit step must be signalled using
* drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
@@ -1481,6 +1464,8 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
}
crtc_state->event->base.completion = &commit->flip_done;
+ crtc_state->event->base.completion_release = release_crtc_commit;
+ drm_crtc_commit_get(commit);
}
return 0;
@@ -1510,8 +1495,7 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
* This function waits for all preceeding commits that touch the same CRTC as
* @old_state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
- * by calling drm_crtc_vblank_send_event on the event member of
- * &drm_crtc_state).
+ * by calling drm_crtc_vblank_send_event() on the &drm_crtc_state.event).
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
@@ -1648,8 +1632,9 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
* @state: atomic state object with new state structures
*
* This function prepares plane state, specifically framebuffers, for the new
- * configuration. If any failure is encountered this function will call
- * ->cleanup_fb on any already successfully prepared framebuffer.
+ * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
+ * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
+ * any already successfully prepared framebuffer.
*
* Returns:
* 0 on success, negative error code on failure.
@@ -1729,10 +1714,10 @@ static bool plane_crtc_active(const struct drm_plane_state *state)
*
* Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
* display controllers require to disable a CRTC's planes when the CRTC is
- * disabled. This function would skip the ->atomic_disable call for a plane if
- * the CRTC of the old plane state needs a modesetting operation. Of course,
- * the drivers need to disable the planes in their CRTC disable callbacks
- * since no one else would do that.
+ * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
+ * call for a plane if the CRTC of the old plane state needs a modesetting
+ * operation. Of course, the drivers need to disable the planes in their CRTC
+ * disable callbacks since no one else would do that.
*
* The drm_atomic_helper_commit() default implementation doesn't set the
* ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
@@ -1895,7 +1880,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
* planes.
*
* It is a bug to call this function without having implemented the
- * ->atomic_disable() plane hook.
+ * &drm_plane_helper_funcs.atomic_disable plane hook.
*/
void
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
@@ -1982,8 +1967,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
* contains the old state. Also do any other cleanup required with that state.
*
* @stall must be set when nonblocking commits for this driver directly access
- * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the
- * current atomic helpers this is almost always the case, since the helpers
+ * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
+ * the current atomic helpers this is almost always the case, since the helpers
* don't pass the right state structures to the callbacks.
*/
void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
@@ -2384,7 +2369,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
if (ret != 0)
return ret;
- drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
+ drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
drm_atomic_set_fb_for_plane(primary_state, set->fb);
primary_state->crtc_x = 0;
@@ -2435,6 +2420,7 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
{
struct drm_atomic_state *state;
struct drm_connector *conn;
+ struct drm_connector_list_iter conn_iter;
int err;
state = drm_atomic_state_alloc(dev);
@@ -2443,7 +2429,8 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
state->acquire_ctx = ctx;
- drm_for_each_connector(conn, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_crtc *crtc = conn->state->crtc;
struct drm_crtc_state *crtc_state;
@@ -2461,6 +2448,7 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
err = drm_atomic_commit(state);
free:
+ drm_connector_list_iter_put(&conn_iter);
drm_atomic_state_put(state);
return err;
}
@@ -2726,6 +2714,44 @@ backoff:
}
EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
+static int page_flip_common(
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_plane *plane = crtc->primary;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->event = event;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ return ret;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+
+ /* Make sure we don't accidentally do a full modeset. */
+ state->allow_modeset = false;
+ if (!crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
+ crtc->base.id);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
/**
* drm_atomic_helper_page_flip - execute a legacy page flip
* @crtc: DRM crtc
@@ -2733,7 +2759,8 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
* @event: optional DRM event to signal upon completion
* @flags: flip flags for non-vblank sync'ed updates
*
- * Provides a default page flip implementation using the atomic driver interface.
+ * Provides a default &drm_crtc_funcs.page_flip implementation
+ * using the atomic driver interface.
*
* Note that for now so called async page flips (i.e. updates which are not
* synchronized to vblank) are not supported, since the atomic interfaces have
@@ -2741,6 +2768,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
+ *
+ * See also:
+ * drm_atomic_helper_page_flip_target()
*/
int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -2749,8 +2779,6 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
{
struct drm_plane *plane = crtc->primary;
struct drm_atomic_state *state;
- struct drm_plane_state *plane_state;
- struct drm_crtc_state *crtc_state;
int ret = 0;
if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
@@ -2761,35 +2789,86 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
return -ENOMEM;
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+
retry:
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
+ ret = page_flip_common(state, crtc, fb, event);
+ if (ret != 0)
goto fail;
- }
- crtc_state->event = event;
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state)) {
- ret = PTR_ERR(plane_state);
- goto fail;
- }
+ ret = drm_atomic_nonblocking_commit(state);
- ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_put(state);
+ return ret;
+
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ /*
+ * Someone might have exchanged the framebuffer while we dropped locks
+ * in the backoff code. We need to fix up the fb refcount tracking the
+ * core does for us.
+ */
+ plane->old_fb = plane->fb;
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_page_flip);
+
+/**
+ * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
+ * @crtc: DRM crtc
+ * @fb: DRM framebuffer
+ * @event: optional DRM event to signal upon completion
+ * @flags: flip flags for non-vblank sync'ed updates
+ * @target: specifying the target vblank period when the flip to take effect
+ *
+ * Provides a default &drm_crtc_funcs.page_flip_target implementation.
+ * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
+ * target vblank period to flip.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
+ */
+int drm_atomic_helper_page_flip_target(
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags,
+ uint32_t target)
+{
+ struct drm_plane *plane = crtc->primary;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+
+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ return -EINVAL;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+
+retry:
+ ret = page_flip_common(state, crtc, fb, event);
if (ret != 0)
goto fail;
- drm_atomic_set_fb_for_plane(plane_state, fb);
- /* Make sure we don't accidentally do a full modeset. */
- state->allow_modeset = false;
- if (!crtc_state->active) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
- crtc->base.id);
+ crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+ if (WARN_ON(!crtc_state)) {
ret = -EINVAL;
goto fail;
}
+ crtc_state->target_vblank = target;
ret = drm_atomic_nonblocking_commit(state);
+
fail:
if (ret == -EDEADLK)
goto backoff;
@@ -2810,7 +2889,7 @@ backoff:
goto retry;
}
-EXPORT_SYMBOL(drm_atomic_helper_page_flip);
+EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
/**
* drm_atomic_helper_connector_dpms() - connector dpms helper implementation
@@ -2819,8 +2898,8 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
*
* This is the main helper function provided by the atomic helper framework for
* implementing the legacy DPMS connector interface. It computes the new desired
- * ->active state for the corresponding CRTC (if the connector is enabled) and
- * updates it.
+ * &drm_crtc_state.active state for the corresponding CRTC (if the connector is
+ * enabled) and updates it.
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
@@ -2833,6 +2912,7 @@ int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
struct drm_connector *tmp_connector;
+ struct drm_connector_list_iter conn_iter;
int ret;
bool active = false;
int old_mode = connector->dpms;
@@ -2860,7 +2940,8 @@ retry:
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
- drm_for_each_connector(tmp_connector, connector->dev) {
+ drm_connector_list_iter_get(connector->dev, &conn_iter);
+ drm_for_each_connector_iter(tmp_connector, &conn_iter) {
if (tmp_connector->state->crtc != crtc)
continue;
@@ -2869,6 +2950,7 @@ retry:
break;
}
}
+ drm_connector_list_iter_put(&conn_iter);
crtc_state->active = active;
ret = drm_atomic_commit(state);
@@ -2889,11 +2971,11 @@ backoff:
EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
/**
- * drm_atomic_helper_best_encoder - Helper for &drm_connector_helper_funcs
- * ->best_encoder callback
+ * drm_atomic_helper_best_encoder - Helper for
+ * &drm_connector_helper_funcs.best_encoder callback
* @connector: Connector control structure
*
- * This is a &drm_connector_helper_funcs ->best_encoder callback helper for
+ * This is a &drm_connector_helper_funcs.best_encoder callback helper for
* connectors that support exactly 1 encoder, statically determined at driver
* init time.
*/
@@ -2927,7 +3009,7 @@ EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
*/
/**
- * drm_atomic_helper_crtc_reset - default ->reset hook for CRTCs
+ * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
* @crtc: drm CRTC
*
* Resets the atomic state for @crtc by freeing the state pointer (which might
@@ -3034,7 +3116,7 @@ void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
/**
- * drm_atomic_helper_plane_reset - default ->reset hook for planes
+ * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
* @plane: drm plane
*
* Resets the atomic state for @plane by freeing the state pointer (which might
@@ -3138,8 +3220,9 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
* @conn_state: connector state to assign
*
* Initializes the newly allocated @conn_state and assigns it to
- * #connector ->state, usually required when initializing the drivers
- * or when called from the ->reset hook.
+ * the &drm_conector->state pointer of @connector, usually required when
+ * initializing the drivers or when called from the &drm_connector_funcs.reset
+ * hook.
*
* This is useful for drivers that subclass the connector state.
*/
@@ -3155,7 +3238,7 @@ __drm_atomic_helper_connector_reset(struct drm_connector *connector,
EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
/**
- * drm_atomic_helper_connector_reset - default ->reset hook for connectors
+ * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
* @connector: drm connector
*
* Resets the atomic state for @connector by freeing the state pointer (which
@@ -3246,6 +3329,7 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
{
struct drm_atomic_state *state;
struct drm_connector *conn;
+ struct drm_connector_list_iter conn_iter;
struct drm_plane *plane;
struct drm_crtc *crtc;
int err = 0;
@@ -3276,15 +3360,18 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
}
}
- drm_for_each_connector(conn, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
conn_state = drm_atomic_get_connector_state(state, conn);
if (IS_ERR(conn_state)) {
err = PTR_ERR(conn_state);
+ drm_connector_list_iter_put(&conn_iter);
goto free;
}
}
+ drm_connector_list_iter_put(&conn_iter);
/* clear the acquire context so that it isn't accidentally reused */
state->acquire_ctx = NULL;
@@ -3310,11 +3397,6 @@ EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
void
__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
{
- /*
- * This is currently a placeholder so that drivers that subclass the
- * state will automatically do the right thing if code is ever added
- * to this function.
- */
if (state->crtc)
drm_connector_unreference(state->connector);
}
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 6b143514a566..7ff697389d74 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -35,13 +35,13 @@
/**
* DOC: master and authentication
*
- * struct &drm_master is used to track groups of clients with open
- * primary/legacy device nodes. For every struct &drm_file which has had at
+ * &struct drm_master is used to track groups of clients with open
+ * primary/legacy device nodes. For every &struct drm_file which has had at
* least once successfully became the device master (either through the
* SET_MASTER IOCTL, or implicitly through opening the primary device node when
* no one else is the current master that time) there exists one &drm_master.
- * This is noted in the is_master member of &drm_file. All other clients have
- * just a pointer to the &drm_master they are associated with.
+ * This is noted in &drm_file.is_master. All other clients have just a pointer
+ * to the &drm_master they are associated with.
*
* In addition only one &drm_master can be the current master for a &drm_device.
* It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or
@@ -294,7 +294,7 @@ EXPORT_SYMBOL(drm_is_current_master);
/**
* drm_master_get - reference a master pointer
- * @master: struct &drm_master
+ * @master: &struct drm_master
*
* Increments the reference count of @master and returns a pointer to @master.
*/
@@ -322,7 +322,7 @@ static void drm_master_destroy(struct kref *kref)
/**
* drm_master_put - unreference and clear a master pointer
- * @master: pointer to a pointer of struct &drm_master
+ * @master: pointer to a pointer of &struct drm_master
*
* This decrements the &drm_master behind @master and sets it to NULL.
*/
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 1f2412c7ccfd..665aafc6ad68 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -40,9 +40,8 @@
* sub-pixel accuracy, which is scaled up to a pixel-aligned destination
* rectangle in the visible area of a &drm_crtc. The visible area of a CRTC is
* defined by the horizontal and vertical visible pixels (stored in @hdisplay
- * and @vdisplay) of the requested mode (stored in @mode in the
- * &drm_crtc_state). These two rectangles are both stored in the
- * &drm_plane_state.
+ * and @vdisplay) of the requested mode (stored in &drm_crtc_state.mode). These
+ * two rectangles are both stored in the &drm_plane_state.
*
* For the atomic ioctl the following standard (atomic) properties on the plane object
* encode the basic plane composition model:
@@ -215,7 +214,7 @@ EXPORT_SYMBOL(drm_rotation_simplify);
* for it in drm core. Drivers can then attach this property to planes to enable
* support for configurable planes arrangement during blending operation.
* Once mutable zpos property has been enabled, the DRM core will automatically
- * calculate drm_plane_state->normalized_zpos values. Usually min should be set
+ * calculate &drm_plane_state.normalized_zpos values. Usually min should be set
* to 0 and max to maximal number of planes for given crtc - 1.
*
* If zpos of some planes cannot be changed (like fixed background or
@@ -367,8 +366,8 @@ done:
* For every CRTC this function checks new states of all planes assigned to
* it and calculates normalized zpos value for these planes. Planes are compared
* first by their zpos values, then by plane id (if zpos is equal). The plane
- * with lowest zpos value is at the bottom. The plane_state->normalized_zpos is
- * then filled with unique values from 0 to number of active planes in crtc
+ * with lowest zpos value is at the bottom. The &drm_plane_state.normalized_zpos
+ * is then filled with unique values from 0 to number of active planes in crtc
* minus one.
*
* RETURNS
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 0ee052b7c21a..86a7637ba344 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -26,11 +26,14 @@
#include <linux/mutex.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_encoder.h>
+
+#include "drm_crtc_internal.h"
/**
* DOC: overview
*
- * struct &drm_bridge represents a device that hangs on to an encoder. These are
+ * &struct drm_bridge represents a device that hangs on to an encoder. These are
* handy when a regular &drm_encoder entity isn't enough to represent the entire
* encoder chain.
*
@@ -52,7 +55,7 @@
* just provide additional hooks to get the desired output at the end of the
* encoder chain.
*
- * Bridges can also be chained up using the next pointer in struct &drm_bridge.
+ * Bridges can also be chained up using the &drm_bridge.next pointer.
*
* Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
*/
@@ -92,47 +95,58 @@ void drm_bridge_remove(struct drm_bridge *bridge)
EXPORT_SYMBOL(drm_bridge_remove);
/**
- * drm_bridge_attach - associate given bridge to our DRM device
+ * drm_bridge_attach - attach the bridge to an encoder's chain
*
- * @dev: DRM device
- * @bridge: bridge control structure
+ * @encoder: DRM encoder
+ * @bridge: bridge to attach
+ * @previous: previous bridge in the chain (optional)
*
- * Called by a kms driver to link one of our encoder/bridge to the given
- * bridge.
+ * Called by a kms driver to link the bridge to an encoder's chain. The previous
+ * argument specifies the previous bridge in the chain. If NULL, the bridge is
+ * linked directly at the encoder's output. Otherwise it is linked at the
+ * previous bridge's output.
*
- * Note that setting up links between the bridge and our encoder/bridge
- * objects needs to be handled by the kms driver itself.
+ * If non-NULL the previous bridge must be already attached by a call to this
+ * function.
*
* RETURNS:
* Zero on success, error code on failure
*/
-int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
+int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
+ struct drm_bridge *previous)
{
- if (!dev || !bridge)
+ int ret;
+
+ if (!encoder || !bridge)
+ return -EINVAL;
+
+ if (previous && (!previous->dev || previous->encoder != encoder))
return -EINVAL;
if (bridge->dev)
return -EBUSY;
- bridge->dev = dev;
+ bridge->dev = encoder->dev;
+ bridge->encoder = encoder;
+
+ if (bridge->funcs->attach) {
+ ret = bridge->funcs->attach(bridge);
+ if (ret < 0) {
+ bridge->dev = NULL;
+ bridge->encoder = NULL;
+ return ret;
+ }
+ }
- if (bridge->funcs->attach)
- return bridge->funcs->attach(bridge);
+ if (previous)
+ previous->next = bridge;
+ else
+ encoder->bridge = bridge;
return 0;
}
EXPORT_SYMBOL(drm_bridge_attach);
-/**
- * drm_bridge_detach - deassociate given bridge from its DRM device
- *
- * @bridge: bridge control structure
- *
- * Called by a kms driver to unlink the given bridge from its DRM device.
- *
- * Note that tearing down links between the bridge and our encoder/bridge
- * objects needs to be handled by the kms driver itself.
- */
void drm_bridge_detach(struct drm_bridge *bridge)
{
if (WARN_ON(!bridge))
@@ -146,7 +160,6 @@ void drm_bridge_detach(struct drm_bridge *bridge)
bridge->dev = NULL;
}
-EXPORT_SYMBOL(drm_bridge_detach);
/**
* DOC: bridge callbacks
@@ -166,7 +179,7 @@ EXPORT_SYMBOL(drm_bridge_detach);
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
*
- * Calls ->mode_fixup() &drm_bridge_funcs op for all the bridges in the
+ * Calls &drm_bridge_funcs.mode_fixup for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
@@ -193,11 +206,10 @@ bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_mode_fixup);
/**
- * drm_bridge_disable - calls ->disable() &drm_bridge_funcs op for all
- * bridges in the encoder chain.
+ * drm_bridge_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
*
- * Calls ->disable() &drm_bridge_funcs op for all the bridges in the encoder
+ * Calls &drm_bridge_funcs.disable op for all the bridges in the encoder
* chain, starting from the last bridge to the first. These are called before
* calling the encoder's prepare op.
*
@@ -216,11 +228,10 @@ void drm_bridge_disable(struct drm_bridge *bridge)
EXPORT_SYMBOL(drm_bridge_disable);
/**
- * drm_bridge_post_disable - calls ->post_disable() &drm_bridge_funcs op for
- * all bridges in the encoder chain.
+ * drm_bridge_post_disable - cleans up after disabling all bridges in the encoder chain
* @bridge: bridge control structure
*
- * Calls ->post_disable() &drm_bridge_funcs op for all the bridges in the
+ * Calls &drm_bridge_funcs.post_disable op for all the bridges in the
* encoder chain, starting from the first bridge to the last. These are called
* after completing the encoder's prepare op.
*
@@ -245,7 +256,7 @@ EXPORT_SYMBOL(drm_bridge_post_disable);
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
*
- * Calls ->mode_set() &drm_bridge_funcs op for all the bridges in the
+ * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
@@ -265,11 +276,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_mode_set);
/**
- * drm_bridge_pre_enable - calls ->pre_enable() &drm_bridge_funcs op for all
- * bridges in the encoder chain.
+ * drm_bridge_pre_enable - prepares for enabling all
+ * bridges in the encoder chain
* @bridge: bridge control structure
*
- * Calls ->pre_enable() &drm_bridge_funcs op for all the bridges in the encoder
+ * Calls &drm_bridge_funcs.pre_enable op for all the bridges in the encoder
* chain, starting from the last bridge to the first. These are called
* before calling the encoder's commit op.
*
@@ -288,11 +299,10 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge)
EXPORT_SYMBOL(drm_bridge_pre_enable);
/**
- * drm_bridge_enable - calls ->enable() &drm_bridge_funcs op for all bridges
- * in the encoder chain.
+ * drm_bridge_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
*
- * Calls ->enable() &drm_bridge_funcs op for all the bridges in the encoder
+ * Calls &drm_bridge_funcs.enable op for all the bridges in the encoder
* chain, starting from the first bridge to the last. These are called
* after completing the encoder's commit op.
*
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index a7916e5f8864..c3b9aaccdf42 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -29,7 +29,9 @@
*/
#include <linux/export.h>
-#include <drm/drmP.h>
+#include <linux/highmem.h>
+
+#include <drm/drm_cache.h>
#if defined(CONFIG_X86)
#include <asm/smp.h>
@@ -67,6 +69,14 @@ static void drm_cache_flush_clflush(struct page *pages[],
}
#endif
+/**
+ * drm_clflush_pages - Flush dcache lines of a set of pages.
+ * @pages: List of pages to be flushed.
+ * @num_pages: Number of pages in the array.
+ *
+ * Flush every data cache line entry that points to an address belonging
+ * to a page in the array.
+ */
void
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
@@ -101,6 +111,13 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
}
EXPORT_SYMBOL(drm_clflush_pages);
+/**
+ * drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
+ * @st: struct sg_table.
+ *
+ * Flush every data cache line entry that points to an address in the
+ * sg.
+ */
void
drm_clflush_sg(struct sg_table *st)
{
@@ -125,6 +142,14 @@ drm_clflush_sg(struct sg_table *st)
}
EXPORT_SYMBOL(drm_clflush_sg);
+/**
+ * drm_clflush_virt_range - Flush dcache lines of a region
+ * @addr: Initial kernel memory address.
+ * @length: Region size.
+ *
+ * Flush every data cache line entry that points to an address in the
+ * region requested.
+ */
void
drm_clflush_virt_range(void *addr, unsigned long length)
{
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 6543ebde501a..cc23b9a505c0 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -36,7 +36,7 @@
* "DEGAMMA_LUT”:
* Blob property to set the degamma lookup table (LUT) mapping pixel data
* from the framebuffer before it is given to the transformation matrix.
- * The data is interpreted as an array of struct &drm_color_lut elements.
+ * The data is interpreted as an array of &struct drm_color_lut elements.
* Hardware might choose not to use the full precision of the LUT elements
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
@@ -65,7 +65,7 @@
* “GAMMA_LUT”:
* Blob property to set the gamma lookup table (LUT) mapping pixel data
* after the transformation matrix to data sent to the connector. The
- * data is interpreted as an array of struct &drm_color_lut elements.
+ * data is interpreted as an array of &struct drm_color_lut elements.
* Hardware might choose not to use the full precision of the LUT elements
* nor use all the elements of the LUT (for example the hardware might
* choose to interpolate between LUT[0] and LUT[4]).
@@ -88,6 +88,30 @@
*/
/**
+ * drm_color_lut_extract - clamp and round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract a degamma/gamma LUT value provided by user (in the form of
+ * &drm_color_lut entries) and round it to the precision supported by the
+ * hardware.
+ */
+uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision)
+{
+ uint32_t val = user_input;
+ uint32_t max = 0xffff >> (16 - bit_precision);
+
+ /* Round only if we're not using full precision. */
+ if (bit_precision < 16) {
+ val += 1UL << (16 - bit_precision - 1);
+ val >>= 16 - bit_precision;
+ }
+
+ return clamp_val(val, 0, max);
+}
+EXPORT_SYMBOL(drm_color_lut_extract);
+
+/**
* drm_crtc_enable_color_mgmt - enable color management properties
* @crtc: DRM CRTC
* @degamma_lut_size: the size of the degamma lut (before CSC)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 7a7019ac9388..45464c8b797d 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -23,6 +23,7 @@
#include <drm/drmP.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -37,18 +38,17 @@
* Hence they are reference-counted using drm_connector_reference() and
* drm_connector_unreference().
*
- * KMS driver must create, initialize, register and attach at a struct
- * &drm_connector for each such sink. The instance is created as other KMS
- * objects and initialized by setting the following fields.
- *
- * The connector is then registered with a call to drm_connector_init() with a
- * pointer to the connector functions and a connector type, and exposed through
- * sysfs with a call to drm_connector_register().
+ * KMS driver must create, initialize, register and attach at a &struct
+ * drm_connector for each such sink. The instance is created as other KMS
+ * objects and initialized by setting the following fields. The connector is
+ * initialized with a call to drm_connector_init() with a pointer to the
+ * &struct drm_connector_funcs and a connector type, and then exposed to
+ * userspace with a call to drm_connector_register().
*
* Connectors must be attached to an encoder to be used. For devices that map
* connectors to encoders 1:1, the connector should be attached at
* initialization time with a call to drm_mode_connector_attach_encoder(). The
- * driver must also set the struct &drm_connector encoder field to point to the
+ * driver must also set the &drm_connector.encoder field to point to the
* attached encoder.
*
* For connectors which are not fixed (like built-in panels) the driver needs to
@@ -189,13 +189,11 @@ int drm_connector_init(struct drm_device *dev,
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
- drm_modeset_lock_all(dev);
-
ret = drm_mode_object_get_reg(dev, &connector->base,
DRM_MODE_OBJECT_CONNECTOR,
false, drm_connector_free);
if (ret)
- goto out_unlock;
+ return ret;
connector->base.properties = &connector->properties;
connector->dev = dev;
@@ -233,8 +231,10 @@ int drm_connector_init(struct drm_device *dev,
/* We should add connectors at the end to avoid upsetting the connector
* index too much. */
+ spin_lock_irq(&config->connector_list_lock);
list_add_tail(&connector->head, &config->connector_list);
config->num_connector++;
+ spin_unlock_irq(&config->connector_list_lock);
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
drm_object_attach_property(&connector->base,
@@ -259,9 +259,6 @@ out_put:
if (ret)
drm_mode_object_unregister(dev, &connector->base);
-out_unlock:
- drm_modeset_unlock_all(dev);
-
return ret;
}
EXPORT_SYMBOL(drm_connector_init);
@@ -352,8 +349,10 @@ void drm_connector_cleanup(struct drm_connector *connector)
drm_mode_object_unregister(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
+ spin_lock_irq(&dev->mode_config.connector_list_lock);
list_del(&connector->head);
dev->mode_config.num_connector--;
+ spin_unlock_irq(&dev->mode_config.connector_list_lock);
WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
if (connector->state && connector->funcs->atomic_destroy_state)
@@ -444,30 +443,30 @@ EXPORT_SYMBOL(drm_connector_unregister);
void drm_connector_unregister_all(struct drm_device *dev)
{
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
- /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
drm_connector_unregister(connector);
+ drm_connector_list_iter_put(&conn_iter);
}
int drm_connector_register_all(struct drm_device *dev)
{
struct drm_connector *connector;
- int ret;
+ struct drm_connector_list_iter conn_iter;
+ int ret = 0;
- /* FIXME: taking the mode config mutex ends up in a clash with
- * fbcon/backlight registration */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
ret = drm_connector_register(connector);
if (ret)
- goto err;
+ break;
}
+ drm_connector_list_iter_put(&conn_iter);
- return 0;
-
-err:
- mutex_unlock(&dev->mode_config.mutex);
- drm_connector_unregister_all(dev);
+ if (ret)
+ drm_connector_unregister_all(dev);
return ret;
}
@@ -489,6 +488,87 @@ const char *drm_get_connector_status_name(enum drm_connector_status status)
}
EXPORT_SYMBOL(drm_get_connector_status_name);
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map connector_list_iter_dep_map = {
+ .name = "drm_connector_list_iter"
+};
+#endif
+
+/**
+ * drm_connector_list_iter_get - initialize a connector_list iterator
+ * @dev: DRM device
+ * @iter: connector_list iterator
+ *
+ * Sets @iter up to walk the &drm_mode_config.connector_list of @dev. @iter
+ * must always be cleaned up again by calling drm_connector_list_iter_put().
+ * Iteration itself happens using drm_connector_list_iter_next() or
+ * drm_for_each_connector_iter().
+ */
+void drm_connector_list_iter_get(struct drm_device *dev,
+ struct drm_connector_list_iter *iter)
+{
+ iter->dev = dev;
+ iter->conn = NULL;
+ lock_acquire_shared_recursive(&connector_list_iter_dep_map, 0, 1, NULL, _RET_IP_);
+}
+EXPORT_SYMBOL(drm_connector_list_iter_get);
+
+/**
+ * drm_connector_list_iter_next - return next connector
+ * @iter: connectr_list iterator
+ *
+ * Returns the next connector for @iter, or NULL when the list walk has
+ * completed.
+ */
+struct drm_connector *
+drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
+{
+ struct drm_connector *old_conn = iter->conn;
+ struct drm_mode_config *config = &iter->dev->mode_config;
+ struct list_head *lhead;
+ unsigned long flags;
+
+ spin_lock_irqsave(&config->connector_list_lock, flags);
+ lhead = old_conn ? &old_conn->head : &config->connector_list;
+
+ do {
+ if (lhead->next == &config->connector_list) {
+ iter->conn = NULL;
+ break;
+ }
+
+ lhead = lhead->next;
+ iter->conn = list_entry(lhead, struct drm_connector, head);
+
+ /* loop until it's not a zombie connector */
+ } while (!kref_get_unless_zero(&iter->conn->base.refcount));
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
+
+ if (old_conn)
+ drm_connector_unreference(old_conn);
+
+ return iter->conn;
+}
+EXPORT_SYMBOL(drm_connector_list_iter_next);
+
+/**
+ * drm_connector_list_iter_put - tear down a connector_list iterator
+ * @iter: connector_list iterator
+ *
+ * Tears down @iter and releases any resources (like &drm_connector references)
+ * acquired while walking the list. This must always be called, both when the
+ * iteration completes fully or when it was aborted without walking the entire
+ * list.
+ */
+void drm_connector_list_iter_put(struct drm_connector_list_iter *iter)
+{
+ iter->dev = NULL;
+ if (iter->conn)
+ drm_connector_unreference(iter->conn);
+ lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(drm_connector_list_iter_put);
+
static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
{ SubPixelUnknown, "Unknown" },
{ SubPixelHorizontalRGB, "Horizontal RGB" },
@@ -618,8 +698,8 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* drivers this is only provided for backwards compatibility with existing
* drivers, it remaps to controlling the "ACTIVE" property on the CRTC the
* connector is linked to. Drivers should never set this property directly,
- * it is handled by the DRM core by calling the ->dpms() callback in
- * &drm_connector_funcs. Atomic drivers should implement this hook using
+ * it is handled by the DRM core by calling the &drm_connector_funcs.dpms
+ * callback. Atomic drivers should implement this hook using
* drm_atomic_helper_connector_dpms(). This is the only property standard
* connector property that userspace can change.
* PATH:
@@ -1085,43 +1165,65 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
- mutex_lock(&dev->mode_config.mutex);
-
connector = drm_connector_lookup(dev, out_resp->connector_id);
- if (!connector) {
- ret = -ENOENT;
- goto out_unlock;
- }
+ if (!connector)
+ return -ENOENT;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ encoder = drm_connector_get_encoder(connector);
+ if (encoder)
+ out_resp->encoder_id = encoder->base.id;
+ else
+ out_resp->encoder_id = 0;
+
+ ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+ (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+ (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+ &out_resp->count_props);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (ret)
+ goto out_unref;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
if (connector->encoder_ids[i] != 0)
encoders_count++;
+ if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+ copied = 0;
+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ if (put_user(connector->encoder_ids[i],
+ encoder_ptr + copied)) {
+ ret = -EFAULT;
+ goto out_unref;
+ }
+ copied++;
+ }
+ }
+ }
+ out_resp->count_encoders = encoders_count;
+
+ out_resp->connector_id = connector->base.id;
+ out_resp->connector_type = connector->connector_type;
+ out_resp->connector_type_id = connector->connector_type_id;
+
+ mutex_lock(&dev->mode_config.mutex);
if (out_resp->count_modes == 0) {
connector->funcs->fill_modes(connector,
dev->mode_config.max_width,
dev->mode_config.max_height);
}
- /* delayed so we get modes regardless of pre-fill_modes state */
- list_for_each_entry(mode, &connector->modes, head)
- if (drm_mode_expose_to_userspace(mode, file_priv))
- mode_count++;
-
- out_resp->connector_id = connector->base.id;
- out_resp->connector_type = connector->connector_type;
- out_resp->connector_type_id = connector->connector_type_id;
out_resp->mm_width = connector->display_info.width_mm;
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- encoder = drm_connector_get_encoder(connector);
- if (encoder)
- out_resp->encoder_id = encoder->base.id;
- else
- out_resp->encoder_id = 0;
+ /* delayed so we get modes regardless of pre-fill_modes state */
+ list_for_each_entry(mode, &connector->modes, head)
+ if (drm_mode_expose_to_userspace(mode, file_priv))
+ mode_count++;
/*
* This ioctl is called twice, once to determine how much space is
@@ -1144,36 +1246,10 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
}
}
out_resp->count_modes = mode_count;
-
- ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
- (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
- (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
- &out_resp->count_props);
- if (ret)
- goto out;
-
- if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
- copied = 0;
- encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- if (put_user(connector->encoder_ids[i],
- encoder_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- out_resp->count_encoders = encoders_count;
-
out:
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
-
- drm_connector_unreference(connector);
-out_unlock:
mutex_unlock(&dev->mode_config.mutex);
+out_unref:
+ drm_connector_unreference(connector);
return ret;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e75f62cd8a65..6915f897bd8e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -47,6 +47,50 @@
#include "drm_internal.h"
/**
+ * DOC: overview
+ *
+ * A CRTC represents the overall display pipeline. It receives pixel data from
+ * &drm_plane and blends them together. The &drm_display_mode is also attached
+ * to the CRTC, specifying display timings. On the output side the data is fed
+ * to one or more &drm_encoder, which are then each connected to one
+ * &drm_connector.
+ *
+ * To create a CRTC, a KMS drivers allocates and zeroes an instances of
+ * &struct drm_crtc (possibly as part of a larger structure) and registers it
+ * with a call to drm_crtc_init_with_planes().
+ *
+ * The CRTC is also the entry point for legacy modeset operations, see
+ * &drm_crtc_funcs.set_config, legacy plane operations, see
+ * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy
+ * operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these
+ * features are controlled through &drm_property and
+ * &drm_mode_config_funcs.atomic_check and &drm_mode_config_funcs.atomic_check.
+ */
+
+/**
+ * drm_crtc_from_index - find the registered CRTC at an index
+ * @dev: DRM device
+ * @idx: index of registered CRTC to find for
+ *
+ * Given a CRTC index, return the registered CRTC from DRM device's
+ * list of CRTCs with matching index. This is the inverse of drm_crtc_index().
+ * It's useful in the vblank callbacks (like &drm_driver.enable_vblank or
+ * &drm_driver.disable_vblank), since that still deals with indices instead
+ * of pointers to &struct drm_crtc."
+ */
+struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx)
+{
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, dev)
+ if (idx == crtc->index)
+ return crtc;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_crtc_from_index);
+
+/**
* drm_crtc_force_disable - Forcibly turn off a CRTC
* @crtc: CRTC to turn off
*
@@ -357,7 +401,10 @@ int drm_mode_getcrtc(struct drm_device *dev,
drm_modeset_lock_crtc(crtc, crtc->primary);
crtc_resp->gamma_size = crtc->gamma_size;
- if (crtc->primary->fb)
+
+ if (crtc->primary->state && crtc->primary->state->fb)
+ crtc_resp->fb_id = crtc->primary->state->fb->base.id;
+ else if (!crtc->primary->state && crtc->primary->fb)
crtc_resp->fb_id = crtc->primary->fb->base.id;
else
crtc_resp->fb_id = 0;
@@ -389,11 +436,12 @@ int drm_mode_getcrtc(struct drm_device *dev,
}
/**
- * drm_mode_set_config_internal - helper to call ->set_config
+ * drm_mode_set_config_internal - helper to call &drm_mode_config_funcs.set_config
* @set: modeset config to set
*
- * This is a little helper to wrap internal calls to the ->set_config driver
- * interface. The only thing it adds is correct refcounting dance.
+ * This is a little helper to wrap internal calls to the
+ * &drm_mode_config_funcs.set_config driver interface. The only thing it adds is
+ * correct refcounting dance.
*
* Returns:
* Zero on success, negative errno on failure.
@@ -434,27 +482,6 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
EXPORT_SYMBOL(drm_mode_set_config_internal);
/**
- * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
- * @mode: mode to query
- * @hdisplay: hdisplay value to fill in
- * @vdisplay: vdisplay value to fill in
- *
- * The vdisplay value will be doubled if the specified mode is a stereo mode of
- * the appropriate layout.
- */
-void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
- int *hdisplay, int *vdisplay)
-{
- struct drm_display_mode adjusted;
-
- drm_mode_copy(&adjusted, mode);
- drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
- *hdisplay = adjusted.crtc_hdisplay;
- *vdisplay = adjusted.crtc_vdisplay;
-}
-EXPORT_SYMBOL(drm_crtc_get_hv_timing);
-
-/**
* drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
* CRTC viewport
* @crtc: CRTC that framebuffer will be displayed on
@@ -471,7 +498,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
{
int hdisplay, vdisplay;
- drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
+ drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->state &&
drm_rotation_90_or_270(crtc->primary->state->rotation))
@@ -572,11 +599,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
*/
if (!crtc->primary->format_default) {
ret = drm_plane_check_pixel_format(crtc->primary,
- fb->pixel_format);
+ fb->format->format);
if (ret) {
struct drm_format_name_buf format_name;
DRM_DEBUG_KMS("Invalid pixel format %s\n",
- drm_get_format_name(fb->pixel_format,
+ drm_get_format_name(fb->format->format,
&format_name));
goto out;
}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 5d2cb138eba6..44ba0e990d6c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -36,6 +36,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
@@ -52,9 +53,9 @@
* configuration on resume with drm_helper_resume_force_mode().
*
* Note that this helper library doesn't track the current power state of CRTCs
- * and encoders. It can call callbacks like ->dpms() even though the hardware is
- * already in the desired state. This deficiency has been fixed in the atomic
- * helpers.
+ * and encoders. It can call callbacks like &drm_encoder_helper_funcs.dpms even
+ * though the hardware is already in the desired state. This deficiency has been
+ * fixed in the atomic helpers.
*
* The driver callbacks are mostly compatible with the atomic modeset helpers,
* except for the handling of the primary plane: Atomic helpers require that the
@@ -70,7 +71,7 @@
*
* These legacy modeset helpers use the same function table structures as
* all other modesetting helpers. See the documentation for struct
- * &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
+ * &drm_crtc_helper_funcs, &struct drm_encoder_helper_funcs and struct
* &drm_connector_helper_funcs.
*/
@@ -88,6 +89,7 @@
bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
{
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
struct drm_device *dev = encoder->dev;
/*
@@ -99,9 +101,15 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
}
- drm_for_each_connector(connector, dev)
- if (connector->encoder == encoder)
+
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->encoder == encoder) {
+ drm_connector_list_iter_put(&conn_iter);
return true;
+ }
+ }
+ drm_connector_list_iter_put(&conn_iter);
return false;
}
EXPORT_SYMBOL(drm_helper_encoder_in_use);
@@ -436,10 +444,13 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/* Decouple all encoders and their attached connectors from this crtc */
drm_for_each_encoder(encoder, dev) {
+ struct drm_connector_list_iter conn_iter;
+
if (encoder->crtc != crtc)
continue;
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder != encoder)
continue;
@@ -456,6 +467,7 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/* we keep a reference while the encoder is bound */
drm_connector_unreference(connector);
}
+ drm_connector_list_iter_put(&conn_iter);
}
__drm_helper_disable_unused_functions(dev);
@@ -465,12 +477,12 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
*
- * The drm_crtc_helper_set_config() helper function implements the set_config
- * callback of struct &drm_crtc_funcs for drivers using the legacy CRTC helpers.
+ * The drm_crtc_helper_set_config() helper function implements the of
+ * &drm_crtc_funcs.set_config callback for drivers using the legacy CRTC
+ * helpers.
*
* It first tries to locate the best encoder for each connector by calling the
- * connector ->best_encoder() (struct &drm_connector_helper_funcs) helper
- * operation.
+ * connector @drm_connector_helper_funcs.best_encoder helper operation.
*
* After locating the appropriate encoders, the helper function will call the
* mode_fixup encoder and CRTC helper operations to adjust the requested mode,
@@ -481,15 +493,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
*
* If the adjusted mode is identical to the current mode but changes to the
* frame buffer need to be applied, the drm_crtc_helper_set_config() function
- * will call the CRTC ->mode_set_base() (struct &drm_crtc_helper_funcs) helper
- * operation.
+ * will call the CRTC &drm_crtc_helper_funcs.mode_set_base helper operation.
*
* If the adjusted mode differs from the current mode, or if the
* ->mode_set_base() helper operation is not provided, the helper function
* performs a full mode set sequence by calling the ->prepare(), ->mode_set()
* and ->commit() CRTC and encoder helper operations, in that order.
* Alternatively it can also use the dpms and disable helper operations. For
- * details see struct &drm_crtc_helper_funcs and struct
+ * details see &struct drm_crtc_helper_funcs and struct
* &drm_encoder_helper_funcs.
*
* This function is deprecated. New drivers must implement atomic modeset
@@ -507,6 +518,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
bool mode_changed = false; /* if true do a full mode set */
bool fb_changed = false; /* if true and !mode_changed just do a flip */
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int count = 0, ro, fail = 0;
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
@@ -571,9 +583,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
}
count = 0;
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
save_connector_encoders[count++] = connector->encoder;
- }
+ drm_connector_list_iter_put(&conn_iter);
save_set.crtc = set->crtc;
save_set.mode = &set->crtc->mode;
@@ -588,8 +601,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (set->crtc->primary->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
mode_changed = true;
- } else if (set->fb->pixel_format !=
- set->crtc->primary->fb->pixel_format) {
+ } else if (set->fb->format != set->crtc->primary->fb->format) {
mode_changed = true;
} else
fb_changed = true;
@@ -616,7 +628,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
/* a) traverse passed in connector list and get encoders for them */
count = 0;
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
new_encoder = connector->encoder;
@@ -649,6 +662,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
connector->encoder = new_encoder;
}
}
+ drm_connector_list_iter_put(&conn_iter);
if (fail) {
ret = -EINVAL;
@@ -656,7 +670,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
}
count = 0;
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (!connector->encoder)
continue;
@@ -674,6 +689,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (new_crtc &&
!drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
ret = -EINVAL;
+ drm_connector_list_iter_put(&conn_iter);
goto fail;
}
if (new_crtc != connector->encoder->crtc) {
@@ -690,6 +706,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
connector->base.id, connector->name);
}
}
+ drm_connector_list_iter_put(&conn_iter);
/* mode_set_base is not a required function */
if (fb_changed && !crtc_funcs->mode_set_base)
@@ -744,9 +761,10 @@ fail:
}
count = 0;
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
connector->encoder = save_connector_encoders[count++];
- }
+ drm_connector_list_iter_put(&conn_iter);
/* after fail drop reference on all unbound connectors in set, let
* bound connectors keep their reference
@@ -773,12 +791,16 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
struct drm_device *dev = encoder->dev;
- drm_for_each_connector(connector, dev)
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
if (connector->encoder == encoder)
if (connector->dpms < dpms)
dpms = connector->dpms;
+ drm_connector_list_iter_put(&conn_iter);
+
return dpms;
}
@@ -810,12 +832,16 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
{
int dpms = DRM_MODE_DPMS_OFF;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
struct drm_device *dev = crtc->dev;
- drm_for_each_connector(connector, dev)
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
if (connector->encoder && connector->encoder->crtc == crtc)
if (connector->dpms < dpms)
dpms = connector->dpms;
+ drm_connector_list_iter_put(&conn_iter);
+
return dpms;
}
@@ -824,14 +850,15 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
* @connector: affected connector
* @mode: DPMS mode
*
- * The drm_helper_connector_dpms() helper function implements the ->dpms()
- * callback of struct &drm_connector_funcs for drivers using the legacy CRTC helpers.
+ * The drm_helper_connector_dpms() helper function implements the
+ * &drm_connector_funcs.dpms callback for drivers using the legacy CRTC
+ * helpers.
*
* This is the main helper function provided by the CRTC helper framework for
* implementing the DPMS connector attribute. It computes the new desired DPMS
- * state for all encoders and CRTCs in the output mesh and calls the ->dpms()
- * callbacks provided by the driver in struct &drm_crtc_helper_funcs and struct
- * &drm_encoder_helper_funcs appropriately.
+ * state for all encoders and CRTCs in the output mesh and calls the
+ * &drm_crtc_helper_funcs.dpms and &drm_encoder_helper_funcs.dpms callbacks
+ * provided by the driver.
*
* This function is deprecated. New drivers must implement atomic modeset
* support, for which this function is unsuitable. Instead drivers should use
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index cdf6860c9d22..955c5690bf64 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -174,6 +174,11 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
/* drm_atomic.c */
+#ifdef CONFIG_DEBUG_FS
+struct drm_minor;
+int drm_atomic_debugfs_init(struct drm_minor *minor);
+#endif
+
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
int drm_mode_atomic_ioctl(struct drm_device *dev,
@@ -186,6 +191,9 @@ void drm_plane_unregister_all(struct drm_device *dev);
int drm_plane_check_pixel_format(const struct drm_plane *plane,
u32 format);
+/* drm_bridge.c */
+void drm_bridge_detach(struct drm_bridge *bridge);
+
/* IOCTL */
int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -199,3 +207,6 @@ int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+
+/* drm_edid.c */
+void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 2e3e46a53805..2290a74a6e46 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -38,6 +38,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_atomic.h>
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
#if defined(CONFIG_DEBUG_FS)
@@ -80,7 +81,8 @@ static const struct file_operations drm_debugfs_fops = {
* \return Zero on success, non-zero on failure
*
* Create a given set of debugfs files represented by an array of
- * gdm_debugfs_lists in the given root directory.
+ * &drm_info_list in the given root directory. These files will be removed
+ * automatically on drm_debugfs_cleanup().
*/
int drm_debugfs_create_files(const struct drm_info_list *files, int count,
struct dentry *root, struct drm_minor *minor)
@@ -217,6 +219,19 @@ int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
}
EXPORT_SYMBOL(drm_debugfs_remove_files);
+static void drm_debugfs_remove_all_files(struct drm_minor *minor)
+{
+ struct drm_info_node *node, *tmp;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_for_each_entry_safe(node, tmp, &minor->debugfs_list, list) {
+ debugfs_remove(node->dent);
+ list_del(&node->list);
+ kfree(node);
+ }
+ mutex_unlock(&minor->debugfs_lock);
+}
+
/**
* Cleanup the debugfs filesystem resources.
*
@@ -228,7 +243,6 @@ EXPORT_SYMBOL(drm_debugfs_remove_files);
int drm_debugfs_cleanup(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
- int ret;
if (!minor->debugfs_root)
return 0;
@@ -236,17 +250,9 @@ int drm_debugfs_cleanup(struct drm_minor *minor)
if (dev->driver->debugfs_cleanup)
dev->driver->debugfs_cleanup(minor);
- if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
- ret = drm_atomic_debugfs_cleanup(minor);
- if (ret) {
- DRM_ERROR("DRM: Failed to remove atomic debugfs entries\n");
- return ret;
- }
- }
-
- drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
+ drm_debugfs_remove_all_files(minor);
- debugfs_remove(minor->debugfs_root);
+ debugfs_remove_recursive(minor->debugfs_root);
minor->debugfs_root = NULL;
return 0;
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 00e771fb7df2..96891c4a6e23 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -125,6 +125,12 @@ static const struct file_operations drm_crtc_crc_control_fops = {
.write = crc_control_write
};
+static int crtc_crc_data_count(struct drm_crtc_crc *crc)
+{
+ assert_spin_locked(&crc->lock);
+ return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
+}
+
static int crtc_crc_open(struct inode *inode, struct file *filep)
{
struct drm_crtc *crtc = inode->i_private;
@@ -160,8 +166,19 @@ static int crtc_crc_open(struct inode *inode, struct file *filep)
crc->entries = entries;
crc->values_cnt = values_cnt;
crc->opened = true;
+
+ /*
+ * Only return once we got a first frame, so userspace doesn't have to
+ * guess when this particular piece of HW will be ready to start
+ * generating CRCs.
+ */
+ ret = wait_event_interruptible_lock_irq(crc->wq,
+ crtc_crc_data_count(crc),
+ crc->lock);
spin_unlock_irq(&crc->lock);
+ WARN_ON(ret);
+
return 0;
err_disable:
@@ -189,12 +206,6 @@ static int crtc_crc_release(struct inode *inode, struct file *filep)
return 0;
}
-static int crtc_crc_data_count(struct drm_crtc_crc *crc)
-{
- assert_spin_locked(&crc->lock);
- return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
-}
-
/*
* 1 frame field of 10 chars plus a number of CRC fields of 10 chars each, space
* separated, with a newline at the end and null-terminated.
@@ -325,16 +336,19 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
struct drm_crtc_crc_entry *entry;
int head, tail;
- assert_spin_locked(&crc->lock);
+ spin_lock(&crc->lock);
/* Caller may not have noticed yet that userspace has stopped reading */
- if (!crc->opened)
+ if (!crc->opened) {
+ spin_unlock(&crc->lock);
return -EINVAL;
+ }
head = crc->head;
tail = crc->tail;
if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
+ spin_unlock(&crc->lock);
DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
return -ENOBUFS;
}
@@ -347,6 +361,10 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
crc->head = head;
+ spin_unlock(&crc->lock);
+
+ wake_up_interruptible(&crc->wq);
+
return 0;
}
EXPORT_SYMBOL_GPL(drm_crtc_add_crc_entry);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 3e6fe82c6d64..68908c1d5ca1 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -725,7 +725,7 @@ MODULE_PARM_DESC(dp_aux_i2c_speed_khz,
/*
* Transfer a single I2C-over-AUX message and handle various error conditions,
* retrying the transaction as appropriate. It is assumed that the
- * aux->transfer function does not modify anything in the msg other than the
+ * &drm_dp_aux.transfer function does not modify anything in the msg other than the
* reply field.
*
* Returns bytes transferred on success, or a negative error code on failure.
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index f59771da52ee..f2cc375907d0 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1086,7 +1086,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
}
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
- struct device *dev,
+ struct drm_device *dev,
struct drm_dp_link_addr_reply_port *port_msg)
{
struct drm_dp_mst_port *port;
@@ -1104,7 +1104,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->port_num = port_msg->port_number;
port->mgr = mstb->mgr;
port->aux.name = "DPMST";
- port->aux.dev = dev;
+ port->aux.dev = dev->dev;
created = true;
} else {
old_pdt = port->pdt;
@@ -2949,7 +2949,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
* Return 0 for success, or negative error code on failure
*/
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
- struct device *dev, struct drm_dp_aux *aux,
+ struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes,
int max_payloads, int conn_base_id)
{
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6594b4088f11..b5c6bb46a425 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -221,7 +221,7 @@ static int drm_minor_register(struct drm_device *dev, unsigned int type)
ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- return ret;
+ goto err_debugfs;
}
ret = device_add(minor->kdev);
@@ -298,7 +298,7 @@ void drm_minor_release(struct drm_minor *minor)
/**
* DOC: driver instance overview
*
- * A device instance for a drm driver is represented by struct &drm_device. This
+ * A device instance for a drm driver is represented by &struct drm_device. This
* is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
* callbacks implemented by the driver. The driver then needs to initialize all
* the various subsystems for the drm device like memory management, vblank
@@ -309,7 +309,7 @@ void drm_minor_release(struct drm_minor *minor)
* userspace the device instance can be published using drm_dev_register().
*
* There is also deprecated support for initalizing device instances using
- * bus-specific helpers and the ->load() callback. But due to
+ * bus-specific helpers and the &drm_driver.load callback. But due to
* backwards-compatibility needs the device instance have to be published too
* early, which requires unpretty global locking to make safe and is therefore
* only support for existing drivers not yet converted to the new scheme.
@@ -323,9 +323,8 @@ void drm_minor_release(struct drm_minor *minor)
* historical baggage. Hence use the reference counting provided by
* drm_dev_ref() and drm_dev_unref() only carefully.
*
- * Also note that embedding of &drm_device is currently not (yet) supported (but
- * it would be easy to add). Drivers can store driver-private data in the
- * dev_priv field of &drm_device.
+ * It is recommended that drivers embed &struct drm_device into their own device
+ * structure, which is supported through drm_dev_init().
*/
/**
@@ -462,7 +461,14 @@ static void drm_fs_inode_free(struct inode *inode)
* Note that for purely virtual devices @parent can be NULL.
*
* Drivers that do not want to allocate their own device struct
- * embedding struct &drm_device can call drm_dev_alloc() instead.
+ * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
+ * that do embed &struct drm_device it must be placed first in the overall
+ * structure, and the overall structure must be allocated using kmalloc(): The
+ * drm core's release function unconditionally calls kfree() on the @dev pointer
+ * when the final reference is released. To override this behaviour, and so
+ * allow embedding of the drm_device inside the driver's device struct at an
+ * arbitrary offset, you must supply a &drm_driver.release callback and control
+ * the finalization explicitly.
*
* RETURNS:
* 0 on success, or error code on failure.
@@ -550,6 +556,41 @@ err_free:
EXPORT_SYMBOL(drm_dev_init);
/**
+ * drm_dev_fini - Finalize a dead DRM device
+ * @dev: DRM device
+ *
+ * Finalize a dead DRM device. This is the converse to drm_dev_init() and
+ * frees up all data allocated by it. All driver private data should be
+ * finalized first. Note that this function does not free the @dev, that is
+ * left to the caller.
+ *
+ * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
+ * from a &drm_driver.release callback.
+ */
+void drm_dev_fini(struct drm_device *dev)
+{
+ drm_vblank_cleanup(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_destroy(dev);
+
+ drm_legacy_ctxbitmap_cleanup(dev);
+ drm_ht_remove(&dev->map_hash);
+ drm_fs_inode_free(dev->anon_inode);
+
+ drm_minor_free(dev, DRM_MINOR_PRIMARY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
+
+ mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->ctxlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
+ kfree(dev->unique);
+}
+EXPORT_SYMBOL(drm_dev_fini);
+
+/**
* drm_dev_alloc - Allocate new DRM device
* @driver: DRM driver to allocate device for
* @parent: Parent device object
@@ -565,7 +606,7 @@ EXPORT_SYMBOL(drm_dev_init);
*
* Note that for purely virtual devices @parent can be NULL.
*
- * Drivers that wish to subclass or embed struct &drm_device into their
+ * Drivers that wish to subclass or embed &struct drm_device into their
* own struct should look at using drm_dev_init() instead.
*
* RETURNS:
@@ -595,23 +636,12 @@ static void drm_dev_release(struct kref *ref)
{
struct drm_device *dev = container_of(ref, struct drm_device, ref);
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_destroy(dev);
-
- drm_legacy_ctxbitmap_cleanup(dev);
- drm_ht_remove(&dev->map_hash);
- drm_fs_inode_free(dev->anon_inode);
-
- drm_minor_free(dev, DRM_MINOR_PRIMARY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
- drm_minor_free(dev, DRM_MINOR_CONTROL);
-
- mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->ctxlist_mutex);
- mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
- kfree(dev->unique);
- kfree(dev);
+ if (dev->driver->release) {
+ dev->driver->release(dev);
+ } else {
+ drm_dev_fini(dev);
+ kfree(dev);
+ }
}
/**
@@ -715,9 +745,9 @@ static void remove_compat_control_link(struct drm_device *dev)
* Never call this twice on any device!
*
* NOTE: To ensure backward compatibility with existing drivers method this
- * function calls the ->load() method after registering the device nodes,
- * creating race conditions. Usage of the ->load() methods is therefore
- * deprecated, drivers must perform all initialization before calling
+ * function calls the &drm_driver.load method after registering the device
+ * nodes, creating race conditions. Usage of the &drm_driver.load methods is
+ * therefore deprecated, drivers must perform all initialization before calling
* drm_dev_register().
*
* RETURNS:
@@ -725,6 +755,7 @@ static void remove_compat_control_link(struct drm_device *dev)
*/
int drm_dev_register(struct drm_device *dev, unsigned long flags)
{
+ struct drm_driver *driver = dev->driver;
int ret;
mutex_lock(&drm_global_mutex);
@@ -757,6 +788,13 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
drm_modeset_register_all(dev);
ret = 0;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ driver->name, driver->major, driver->minor,
+ driver->patchlevel, driver->date,
+ dev->dev ? dev_name(dev->dev) : "virtual device",
+ dev->primary->index);
+
goto out_unlock;
err_minors:
@@ -798,8 +836,6 @@ void drm_dev_unregister(struct drm_device *dev)
if (dev->agp)
drm_pci_agp_destroy(dev);
- drm_vblank_cleanup(dev);
-
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_legacy_rmmap(dev, r_list->map);
@@ -925,7 +961,7 @@ static int __init drm_core_init(void)
if (ret < 0)
goto error;
- DRM_INFO("Initialized\n");
+ DRM_DEBUG("Initialized\n");
return 0;
error:
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 8ac5a1c1d811..10307cc16d75 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -42,8 +42,8 @@
* create dumb buffers suitable for scanout, which can then be used to create
* KMS frame buffers.
*
- * To support dumb objects drivers must implement the dumb_create,
- * dumb_destroy and dumb_map_offset operations from struct &drm_driver. See
+ * To support dumb objects drivers must implement the &drm_driver.dumb_create,
+ * &drm_driver.dumb_destroy and &drm_driver.dumb_map_offset operations. See
* there for further details.
*
* Note that dumb objects may not be used for gpu acceleration, as has been
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 336be31ff3de..c8baab9bee0d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -35,8 +35,11 @@
#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_displayid.h>
+#include "drm_crtc_internal.h"
+
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
((edid)->version == (maj) && (edid)->revision > (min)))
@@ -90,7 +93,7 @@ struct detailed_mode_closure {
#define LEVEL_GTF2 2
#define LEVEL_CVT 3
-static struct edid_quirk {
+static const struct edid_quirk {
char vendor[4];
int product_id;
u32 quirks;
@@ -1477,7 +1480,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
*
* Returns true if @vendor is in @edid, false otherwise
*/
-static bool edid_vendor(struct edid *edid, char *vendor)
+static bool edid_vendor(struct edid *edid, const char *vendor)
{
char edid_vendor[3];
@@ -1497,7 +1500,7 @@ static bool edid_vendor(struct edid *edid, char *vendor)
*/
static u32 edid_get_quirks(struct edid *edid)
{
- struct edid_quirk *quirk;
+ const struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
@@ -2152,7 +2155,7 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
/* fix up 1366x768 mode from 1368x768;
* GFT/CVT can't express 1366 width which isn't dividable by 8
*/
-static void fixup_mode_1366x768(struct drm_display_mode *mode)
+void drm_mode_fixup_1366x768(struct drm_display_mode *mode)
{
if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
mode->hdisplay = 1366;
@@ -2176,7 +2179,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
if (!newmode)
return modes;
- fixup_mode_1366x768(newmode);
+ drm_mode_fixup_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
@@ -2205,7 +2208,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
if (!newmode)
return modes;
- fixup_mode_1366x768(newmode);
+ drm_mode_fixup_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
@@ -3767,6 +3770,25 @@ bool drm_rgb_quant_range_selectable(struct edid *edid)
}
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
+/**
+ * drm_default_rgb_quant_range - default RGB quantization range
+ * @mode: display mode
+ *
+ * Determine the default RGB quantization range for the mode,
+ * as specified in CEA-861.
+ *
+ * Return: The default RGB quantization range for the mode
+ */
+enum hdmi_quantization_range
+drm_default_rgb_quant_range(const struct drm_display_mode *mode)
+{
+ /* All CEA modes other than VIC 1 use limited quantization range. */
+ return drm_match_cea_mode(mode) > 1 ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL;
+}
+EXPORT_SYMBOL(drm_default_rgb_quant_range);
+
static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
const u8 *hdmi)
{
@@ -4272,6 +4294,52 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
+/**
+ * drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe
+ * quantization range information
+ * @frame: HDMI AVI infoframe
+ * @mode: DRM display mode
+ * @rgb_quant_range: RGB quantization range (Q)
+ * @rgb_quant_range_selectable: Sink support selectable RGB quantization range (QS)
+ */
+void
+drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
+ const struct drm_display_mode *mode,
+ enum hdmi_quantization_range rgb_quant_range,
+ bool rgb_quant_range_selectable)
+{
+ /*
+ * CEA-861:
+ * "A Source shall not send a non-zero Q value that does not correspond
+ * to the default RGB Quantization Range for the transmitted Picture
+ * unless the Sink indicates support for the Q bit in a Video
+ * Capabilities Data Block."
+ *
+ * HDMI 2.0 recommends sending non-zero Q when it does match the
+ * default RGB quantization range for the mode, even when QS=0.
+ */
+ if (rgb_quant_range_selectable ||
+ rgb_quant_range == drm_default_rgb_quant_range(mode))
+ frame->quantization_range = rgb_quant_range;
+ else
+ frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+
+ /*
+ * CEA-861-F:
+ * "When transmitting any RGB colorimetry, the Source should set the
+ * YQ-field to match the RGB Quantization Range being transmitted
+ * (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
+ * set YQ=1) and the Sink shall ignore the YQ-field."
+ */
+ if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
+ frame->ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ else
+ frame->ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_FULL;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range);
+
static enum hdmi_3d_structure
s3d_structure_from_display_mode(const struct drm_display_mode *mode)
{
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 992879f15f23..129450713bb7 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -30,8 +30,8 @@
* DOC: overview
*
* Encoders represent the connecting element between the CRTC (as the overall
- * pixel pipeline, represented by struct &drm_crtc) and the connectors (as the
- * generic sink entity, represented by struct &drm_connector). An encoder takes
+ * pixel pipeline, represented by &struct drm_crtc) and the connectors (as the
+ * generic sink entity, represented by &struct drm_connector). An encoder takes
* pixel data from a CRTC and converts it to a format suitable for any attached
* connector. Encoders are objects exposed to userspace, originally to allow
* userspace to infer cloning and connector/CRTC restrictions. Unfortunately
@@ -98,7 +98,7 @@ void drm_encoder_unregister_all(struct drm_device *dev)
*
* Initialises a preallocated encoder. Encoder should be subclassed as part of
* driver encoder objects. At driver unload time drm_encoder_cleanup() should be
- * called from the driver's destroy hook in &drm_encoder_funcs.
+ * called from the driver's &drm_encoder_funcs.destroy hook.
*
* Returns:
* Zero on success, error code on failure.
@@ -159,6 +159,17 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
* the indices on the drm_encoder after us in the encoder_list.
*/
+ if (encoder->bridge) {
+ struct drm_bridge *bridge = encoder->bridge;
+ struct drm_bridge *next;
+
+ while (bridge) {
+ next = bridge->next;
+ drm_bridge_detach(bridge);
+ bridge = next;
+ }
+ }
+
drm_mode_object_unregister(dev, &encoder->base);
kfree(encoder->name);
list_del(&encoder->head);
@@ -173,10 +184,12 @@ static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
bool uses_atomic = false;
+ struct drm_connector_list_iter conn_iter;
/* For atomic drivers only state objects are synchronously updated and
* protected by modeset locks, so check those first. */
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (!connector->state)
continue;
@@ -185,8 +198,10 @@ static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
if (connector->state->best_encoder != encoder)
continue;
+ drm_connector_list_iter_put(&conn_iter);
return connector->state->crtc;
}
+ drm_connector_list_iter_put(&conn_iter);
/* Don't return stale data (e.g. pending async disable). */
if (uses_atomic)
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 4484785cd9ac..cf804389f5ec 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -43,7 +43,7 @@
* &drm_encoder_slave. The @slave_funcs field will be initialized with
* the hooks provided by the slave driver.
*
- * If @info->platform_data is non-NULL it will be used as the initial
+ * If @info.platform_data is non-NULL it will be used as the initial
* slave config.
*
* Returns 0 on success or a negative errno on failure, in particular,
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 81b3558302b5..596fabf18c3e 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -39,6 +39,7 @@ struct drm_fb_cma {
struct drm_fbdev_cma {
struct drm_fb_helper fb_helper;
struct drm_fb_cma *fb;
+ const struct drm_framebuffer_funcs *fb_funcs;
};
/**
@@ -47,50 +48,40 @@ struct drm_fbdev_cma {
* Provides helper functions for creating a cma (contiguous memory allocator)
* backed framebuffer.
*
- * drm_fb_cma_create() is used in the &drm_mode_config_funcs ->fb_create
+ * drm_fb_cma_create() is used in the &drm_mode_config_funcs.fb_create
* callback function to create a cma backed framebuffer.
*
* An fbdev framebuffer backed by cma is also available by calling
* drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
- * If the &drm_framebuffer_funcs ->dirty callback is set, fb_deferred_io
- * will be set up automatically. dirty() is called by
- * drm_fb_helper_deferred_io() in process context (struct delayed_work).
+ * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
+ * set up automatically. &drm_framebuffer_funcs.dirty is called by
+ * drm_fb_helper_deferred_io() in process context (&struct delayed_work).
*
* Example fbdev deferred io code::
*
- * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
- * struct drm_file *file_priv,
- * unsigned flags, unsigned color,
- * struct drm_clip_rect *clips,
- * unsigned num_clips)
+ * static int driver_fb_dirty(struct drm_framebuffer *fb,
+ * struct drm_file *file_priv,
+ * unsigned flags, unsigned color,
+ * struct drm_clip_rect *clips,
+ * unsigned num_clips)
* {
* struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
* ... push changes ...
* return 0;
* }
*
- * static struct drm_framebuffer_funcs driver_fbdev_fb_funcs = {
+ * static struct drm_framebuffer_funcs driver_fb_funcs = {
* .destroy = drm_fb_cma_destroy,
* .create_handle = drm_fb_cma_create_handle,
- * .dirty = driver_fbdev_fb_dirty,
+ * .dirty = driver_fb_dirty,
* };
*
- * static int driver_fbdev_create(struct drm_fb_helper *helper,
- * struct drm_fb_helper_surface_size *sizes)
- * {
- * return drm_fbdev_cma_create_with_funcs(helper, sizes,
- * &driver_fbdev_fb_funcs);
- * }
- *
- * static const struct drm_fb_helper_funcs driver_fb_helper_funcs = {
- * .fb_probe = driver_fbdev_create,
- * };
+ * Initialize::
*
- * Initialize:
* fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
* dev->mode_config.num_crtc,
* dev->mode_config.num_connector,
- * &driver_fb_helper_funcs);
+ * &driver_fb_funcs);
*
*/
@@ -147,7 +138,7 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
if (!fb_cma)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &fb_cma->fb, mode_cmd);
for (i = 0; i < num_planes; i++)
fb_cma->obj[i] = obj[i];
@@ -164,16 +155,16 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
/**
* drm_fb_cma_create_with_funcs() - helper function for the
- * &drm_mode_config_funcs ->fb_create
- * callback function
+ * &drm_mode_config_funcs.fb_create
+ * callback
* @dev: DRM device
* @file_priv: drm file for the ioctl call
* @mode_cmd: metadata from the userspace fb creation request
* @funcs: vtable to be used for the new framebuffer object
*
* This can be used to set &drm_framebuffer_funcs for drivers that need the
- * dirty() callback. Use drm_fb_cma_create() if you don't need to change
- * &drm_framebuffer_funcs.
+ * &drm_framebuffer_funcs.dirty callback. Use drm_fb_cma_create() if you don't
+ * need to change &drm_framebuffer_funcs.
*/
struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
@@ -230,14 +221,14 @@ err_gem_object_unreference:
EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
/**
- * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
+ * drm_fb_cma_create() - &drm_mode_config_funcs.fb_create callback function
* @dev: DRM device
* @file_priv: drm file for the ioctl call
* @mode_cmd: metadata from the userspace fb creation request
*
* If your hardware has special alignment or pitch requirements these should be
* checked before calling this function. Use drm_fb_cma_create_with_funcs() if
- * you need to set &drm_framebuffer_funcs ->dirty.
+ * you need to set &drm_framebuffer_funcs.dirty.
*/
struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -273,7 +264,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
* @plane: Which plane
* @state: Plane state attach fence to
*
- * This should be put into prepare_fb hook of struct &drm_plane_helper_funcs .
+ * This should be set as the &struct drm_plane_helper_funcs.prepare_fb hook.
*
* This function checks if the plane FB has an dma-buf attached, extracts
* the exclusive fence and attaches it to plane state for the atomic helper
@@ -304,15 +295,12 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
- const struct drm_format_info *info;
int i;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
- (char *)&fb->pixel_format);
-
- info = drm_format_info(fb->pixel_format);
+ (char *)&fb->format->format);
- for (i = 0; i < info->num_planes; i++) {
+ for (i = 0; i < fb->format->num_planes; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
drm_gem_cma_describe(fb_cma->obj[i], m);
@@ -411,13 +399,9 @@ static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
kfree(fbi->fbops);
}
-/*
- * For use in a (struct drm_fb_helper_funcs *)->fb_probe callback function that
- * needs custom struct drm_framebuffer_funcs, like dirty() for deferred_io use.
- */
-int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes,
- const struct drm_framebuffer_funcs *funcs)
+static int
+drm_fbdev_cma_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
@@ -453,7 +437,8 @@ int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
goto err_gem_free_object;
}
- fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs);
+ fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1,
+ fbdev_cma->fb_funcs);
if (IS_ERR(fbdev_cma->fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(fbdev_cma->fb);
@@ -467,7 +452,7 @@ int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->fbops = &drm_fbdev_cma_ops;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
offset = fbi->var.xoffset * bytes_per_pixel;
@@ -479,7 +464,7 @@ int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
fbi->screen_size = size;
fbi->fix.smem_len = size;
- if (funcs->dirty) {
+ if (fbdev_cma->fb_funcs->dirty) {
ret = drm_fbdev_cma_defio_init(fbi, obj);
if (ret)
goto err_cma_destroy;
@@ -488,21 +473,13 @@ int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
return 0;
err_cma_destroy:
- drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
- drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+ drm_framebuffer_remove(&fbdev_cma->fb->fb);
err_fb_info_destroy:
drm_fb_helper_release_fbi(helper);
err_gem_free_object:
drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
-EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
-
-static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- return drm_fbdev_cma_create_with_funcs(helper, sizes, &drm_fb_cma_funcs);
-}
static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
.fb_probe = drm_fbdev_cma_create,
@@ -512,15 +489,14 @@ static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
* drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device
- * @num_crtc: Number of CRTCs
* @max_conn_count: Maximum number of connectors
- * @funcs: fb helper functions, in particular fb_probe()
+ * @funcs: fb helper functions, in particular a custom dirty() callback
*
* Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
*/
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int num_crtc,
- unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs)
+ unsigned int preferred_bpp, unsigned int max_conn_count,
+ const struct drm_framebuffer_funcs *funcs)
{
struct drm_fbdev_cma *fbdev_cma;
struct drm_fb_helper *helper;
@@ -531,12 +507,13 @@ struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
return ERR_PTR(-ENOMEM);
}
+ fbdev_cma->fb_funcs = funcs;
helper = &fbdev_cma->fb_helper;
- drm_fb_helper_prepare(dev, helper, funcs);
+ drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+ ret = drm_fb_helper_init(dev, helper, max_conn_count);
if (ret < 0) {
dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
goto err_free;
@@ -576,11 +553,11 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
* Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
*/
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int num_crtc,
- unsigned int max_conn_count)
+ unsigned int preferred_bpp, unsigned int max_conn_count)
{
- return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
- max_conn_count, &drm_fb_cma_helper_funcs);
+ return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
+ max_conn_count,
+ &drm_fb_cma_funcs);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
@@ -595,10 +572,8 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
- if (fbdev_cma->fb) {
- drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
- drm_fb_cma_destroy(&fbdev_cma->fb->fb);
- }
+ if (fbdev_cma->fb)
+ drm_framebuffer_remove(&fbdev_cma->fb->fb);
drm_fb_helper_fini(&fbdev_cma->fb_helper);
kfree(fbdev_cma);
@@ -609,7 +584,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
* drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
* @fbdev_cma: The drm_fbdev_cma struct, may be NULL
*
- * This function is usually called from the DRM drivers lastclose callback.
+ * This function is usually called from the &drm_driver.lastclose callback.
*/
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
{
@@ -622,7 +597,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
* drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
* @fbdev_cma: The drm_fbdev_cma struct, may be NULL
*
- * This function is usually called from the DRM drivers output_poll_changed
+ * This function is usually called from the &drm_mode_config.output_poll_changed
* callback.
*/
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
@@ -646,3 +621,21 @@ void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
}
EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
+
+/**
+ * drm_fbdev_cma_set_suspend_unlocked - wrapper around
+ * drm_fb_helper_set_suspend_unlocked
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * Calls drm_fb_helper_set_suspend, which is a wrapper around
+ * fb_set_suspend implemented by fbdev core.
+ */
+void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
+ int state)
+{
+ if (fbdev_cma)
+ drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper,
+ state);
+}
+EXPORT_SYMBOL(drm_fbdev_cma_set_suspend_unlocked);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e934b541feea..f6d4d9700734 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -66,11 +66,11 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* Teardown is done with drm_fb_helper_fini().
*
* At runtime drivers should restore the fbdev console by calling
- * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback.
- * They should also notify the fb helper code from updates to the output
- * configuration by calling drm_fb_helper_hotplug_event(). For easier
+ * drm_fb_helper_restore_fbdev_mode_unlocked() from their &drm_driver.lastclose
+ * callback. They should also notify the fb helper code from updates to the
+ * output configuration by calling drm_fb_helper_hotplug_event(). For easier
* integration with the output polling code in drm_crtc_helper.c the modeset
- * code provides a ->output_poll_changed callback.
+ * code provides a &drm_mode_config_funcs.output_poll_changed callback.
*
* All other functions exported by the fb helper library can be used to
* implement the fbdev driver interface by the driver.
@@ -79,7 +79,7 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* hotplug detection using the fbdev helpers. The drm_fb_helper_prepare()
* helper must be called first to initialize the minimum required to make
* hotplug detection work. Drivers also need to make sure to properly set up
- * the dev->mode_config.funcs member. After calling drm_kms_helper_poll_init()
+ * the &drm_mode_config.funcs member. After calling drm_kms_helper_poll_init()
* it is safe to enable interrupts and start processing hotplug events. At the
* same time, drivers should initialize all modeset objects such as CRTCs,
* encoders and connectors. To finish up the fbdev helper initialization, the
@@ -88,9 +88,9 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* should call drm_fb_helper_single_add_all_connectors() followed by
* drm_fb_helper_initial_config().
*
- * If &drm_framebuffer_funcs ->dirty is set, the
+ * If &drm_framebuffer_funcs.dirty is set, the
* drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit} functions will
- * accumulate changes and schedule &drm_fb_helper ->dirty_work to run right
+ * accumulate changes and schedule &drm_fb_helper.dirty_work to run right
* away. This worker then calls the dirty() function ensuring that it will
* always run in process context since the fb_*() function could be running in
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
@@ -120,20 +120,22 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
- int i, ret;
+ struct drm_connector_list_iter conn_iter;
+ int i, ret = 0;
if (!drm_fbdev_emulation)
return 0;
mutex_lock(&dev->mode_config.mutex);
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
ret = drm_fb_helper_add_one_connector(fb_helper, connector);
if (ret)
goto fail;
}
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
+ goto out;
+
fail:
drm_fb_helper_for_each_connector(fb_helper, i) {
struct drm_fb_helper_connector *fb_helper_connector =
@@ -145,6 +147,8 @@ fail:
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
+out:
+ drm_connector_list_iter_put(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
return ret;
@@ -243,7 +247,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
}
/**
- * drm_fb_helper_debug_enter - implementation for ->fb_debug_enter
+ * drm_fb_helper_debug_enter - implementation for &fb_ops.fb_debug_enter
* @info: fbdev registered by the helper
*/
int drm_fb_helper_debug_enter(struct fb_info *info)
@@ -292,7 +296,7 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
}
/**
- * drm_fb_helper_debug_leave - implementation for ->fb_debug_leave
+ * drm_fb_helper_debug_leave - implementation for &fb_ops.fb_debug_leave
* @info: fbdev registered by the helper
*/
int drm_fb_helper_debug_leave(struct fb_info *info)
@@ -401,7 +405,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
drm_warn_on_modeset_not_all_locked(dev);
- if (dev->mode_config.funcs->atomic_commit)
+ if (drm_drv_uses_atomic_modeset(dev))
return restore_fbdev_mode_atomic(fb_helper);
drm_for_each_plane(plane, dev) {
@@ -441,7 +445,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: fbcon to restore
*
- * This should be called from driver's drm ->lastclose callback
+ * This should be called from driver's drm &drm_driver.lastclose callback
* when implementing an fbcon on top of kms using this helper. This ensures that
* the user isn't greeted with a black screen when e.g. X dies.
*
@@ -581,7 +585,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
}
/**
- * drm_fb_helper_blank - implementation for ->fb_blank
+ * drm_fb_helper_blank - implementation for &fb_ops.fb_blank
* @blank: desired blanking state
* @info: fbdev registered by the helper
*/
@@ -708,7 +712,6 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
* drm_fb_helper_init - initialize a drm_fb_helper structure
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
- * @crtc_count: maximum number of crtcs to support in this fbdev emulation
* @max_conn_count: max connector count
*
* This allocates the structures for the fbdev helper with the given limits.
@@ -723,9 +726,10 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
*/
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
- int crtc_count, int max_conn_count)
+ int max_conn_count)
{
struct drm_crtc *crtc;
+ struct drm_mode_config *config = &dev->mode_config;
int i;
if (!drm_fbdev_emulation)
@@ -734,11 +738,11 @@ int drm_fb_helper_init(struct drm_device *dev,
if (!max_conn_count)
return -EINVAL;
- fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+ fb_helper->crtc_info = kcalloc(config->num_crtc, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
if (!fb_helper->crtc_info)
return -ENOMEM;
- fb_helper->crtc_count = crtc_count;
+ fb_helper->crtc_count = config->num_crtc;
fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
if (!fb_helper->connector_info) {
kfree(fb_helper->crtc_info);
@@ -747,7 +751,7 @@ int drm_fb_helper_init(struct drm_device *dev,
fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
fb_helper->connector_count = 0;
- for (i = 0; i < crtc_count; i++) {
+ for (i = 0; i < fb_helper->crtc_count; i++) {
fb_helper->crtc_info[i].mode_set.connectors =
kcalloc(max_conn_count,
sizeof(struct drm_connector *),
@@ -856,6 +860,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!drm_fbdev_emulation)
return;
+ cancel_work_sync(&fb_helper->resume_work);
+ cancel_work_sync(&fb_helper->dirty_work);
+
mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
@@ -908,7 +915,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
* @info: fb_info struct pointer
* @pagelist: list of dirty mmap framebuffer pages
*
- * This function is used as the &fb_deferred_io ->deferred_io
+ * This function is used as the &fb_deferred_io.deferred_io
* callback function for flushing the fbdev mmap writes.
*/
void drm_fb_helper_deferred_io(struct fb_info *info,
@@ -1099,7 +1106,7 @@ EXPORT_SYMBOL(drm_fb_helper_set_suspend);
* due to all the printk activity.
*
* This function can be called multiple times with the same state since
- * &fb_info->state is checked to see if fbdev is running or not before locking.
+ * &fb_info.state is checked to see if fbdev is running or not before locking.
*
* Use drm_fb_helper_set_suspend() if you need to take the lock yourself.
*/
@@ -1169,7 +1176,7 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
!fb_helper->funcs->gamma_get))
return -EINVAL;
- WARN_ON(fb->bits_per_pixel != 8);
+ WARN_ON(fb->format->cpp[0] != 1);
fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
@@ -1177,7 +1184,7 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
}
/**
- * drm_fb_helper_setcmap - implementation for ->fb_setcmap
+ * drm_fb_helper_setcmap - implementation for &fb_ops.fb_setcmap
* @cmap: cmap to set
* @info: fbdev registered by the helper
*/
@@ -1234,7 +1241,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
EXPORT_SYMBOL(drm_fb_helper_setcmap);
/**
- * drm_fb_helper_check_var - implementation for ->fb_check_var
+ * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
* @var: screeninfo to check
* @info: fbdev registered by the helper
*/
@@ -1252,14 +1259,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
- if (var->bits_per_pixel != fb->bits_per_pixel ||
+ if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
var->xres != fb->width || var->yres != fb->height ||
var->xres_virtual != fb->width || var->yres_virtual != fb->height) {
DRM_DEBUG("fb userspace requested width/height/bpp different than current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
- fb->width, fb->height, fb->bits_per_pixel);
+ fb->width, fb->height, fb->format->cpp[0] * 8);
return -EINVAL;
}
@@ -1334,7 +1341,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
EXPORT_SYMBOL(drm_fb_helper_check_var);
/**
- * drm_fb_helper_set_par - implementation for ->fb_set_par
+ * drm_fb_helper_set_par - implementation for &fb_ops.fb_set_par
* @info: fbdev registered by the helper
*
* This will let fbcon do the mode init and is called at initialization time by
@@ -1418,7 +1425,7 @@ backoff:
}
/**
- * drm_fb_helper_pan_display - implementation for ->fb_pan_display
+ * drm_fb_helper_pan_display - implementation for &fb_ops.fb_pan_display
* @var: updated screen information
* @info: fbdev registered by the helper
*/
@@ -1440,7 +1447,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
return -EBUSY;
}
- if (dev->mode_config.funcs->atomic_commit) {
+ if (drm_drv_uses_atomic_modeset(dev)) {
ret = pan_display_atomic(var, info);
goto unlock;
}
@@ -1603,7 +1610,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
* additional constraints need to set up their own limits.
*
* Drivers should call this (or their equivalent setup code) from their
- * ->fb_probe callback.
+ * &drm_fb_helper_funcs.fb_probe callback.
*/
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
@@ -1632,11 +1639,11 @@ EXPORT_SYMBOL(drm_fb_helper_fill_fix);
* @fb_height: desired fb height
*
* Sets up the variable fbdev metainformation from the given fb helper instance
- * and the drm framebuffer allocated in fb_helper->fb.
+ * and the drm framebuffer allocated in &drm_fb_helper.fb.
*
* Drivers should call this (or their equivalent setup code) from their
- * ->fb_probe callback after having allocated the fbdev backing
- * storage framebuffer.
+ * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
+ * backing storage framebuffer.
*/
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
@@ -1645,7 +1652,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
- info->var.bits_per_pixel = fb->bits_per_pixel;
+ info->var.bits_per_pixel = fb->format->cpp[0] * 8;
info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
@@ -1653,7 +1660,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.height = -1;
info->var.width = -1;
- switch (fb->depth) {
+ switch (fb->format->depth) {
case 8:
info->var.red.offset = 0;
info->var.green.offset = 0;
@@ -1748,8 +1755,7 @@ static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
return fb_connector->connector->cmdline_mode.specified;
}
-struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
- int width, int height)
+struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn)
{
struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode;
@@ -1867,7 +1873,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
if (!enabled[i])
continue;
fb_helper_conn = fb_helper->connector_info[i];
- modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn);
if (!modes[i]) {
can_clone = false;
break;
@@ -1989,7 +1995,7 @@ retry:
fb_helper_conn->connector->base.id);
/* got for command line mode first */
- modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn);
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0);
@@ -2056,7 +2062,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
* NULL we fallback to the default drm_atomic_helper_best_encoder()
* helper.
*/
- if (fb_helper->dev->mode_config.funcs->atomic_commit &&
+ if (drm_drv_uses_atomic_modeset(fb_helper->dev) &&
!connector_funcs->best_encoder)
encoder = drm_atomic_helper_best_encoder(connector);
else
@@ -2204,9 +2210,9 @@ out:
* Note that this also registers the fbdev and so allows userspace to call into
* the driver through the fbdev interfaces.
*
- * This function will call down into the ->fb_probe callback to let
- * the driver allocate and initialize the fbdev info structure and the drm
- * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
+ * This function will call down into the &drm_fb_helper_funcs.fb_probe callback
+ * to let the driver allocate and initialize the fbdev info structure and the
+ * drm framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
* drm_fb_helper_fill_fix() are provided as helpers to setup simple default
* values for the fbdev info structure.
*
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 5d96de40b63f..afdf5b147f39 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -580,7 +580,7 @@ EXPORT_SYMBOL(drm_poll);
* kmalloc and @p must be the first member element.
*
* This is the locked version of drm_event_reserve_init() for callers which
- * already hold dev->event_lock.
+ * already hold &drm_device.event_lock.
*
* RETURNS:
*
@@ -621,8 +621,8 @@ EXPORT_SYMBOL(drm_event_reserve_init_locked);
* If callers embedded @p into a larger structure it must be allocated with
* kmalloc and @p must be the first member element.
*
- * Callers which already hold dev->event_lock should use
- * drm_event_reserve_init() instead.
+ * Callers which already hold &drm_device.event_lock should use
+ * drm_event_reserve_init_locked() instead.
*
* RETURNS:
*
@@ -677,7 +677,7 @@ EXPORT_SYMBOL(drm_event_cancel_free);
*
* This function sends the event @e, initialized with drm_event_reserve_init(),
* to its associated userspace DRM file. Callers must already hold
- * dev->event_lock, see drm_send_event() for the unlocked version.
+ * &drm_device.event_lock, see drm_send_event() for the unlocked version.
*
* Note that the core will take care of unlinking and disarming events when the
* corresponding DRM file is closed. Drivers need not worry about whether the
@@ -689,8 +689,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
assert_spin_locked(&dev->event_lock);
if (e->completion) {
- /* ->completion might disappear as soon as it signalled. */
complete_all(e->completion);
+ e->completion_release(e->completion);
e->completion = NULL;
}
@@ -717,8 +717,9 @@ EXPORT_SYMBOL(drm_send_event_locked);
* @e: DRM event to deliver
*
* This function sends the event @e, initialized with drm_event_reserve_init(),
- * to its associated userspace DRM file. This function acquires dev->event_lock,
- * see drm_send_event_locked() for callers which already hold this lock.
+ * to its associated userspace DRM file. This function acquires
+ * &drm_device.event_lock, see drm_send_event_locked() for callers which already
+ * hold this lock.
*
* Note that the core will take care of unlinking and disarming events when the
* corresponding DRM file is closed. Drivers need not worry about whether the
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index cbf0c893f426..28a0108a1ab8 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -39,13 +39,13 @@
* Frame buffers rely on the underlying memory manager for allocating backing
* storage. When creating a frame buffer applications pass a memory handle
* (or a list of memory handles for multi-planar formats) through the
- * struct &drm_mode_fb_cmd2 argument. For drivers using GEM as their userspace
+ * &struct drm_mode_fb_cmd2 argument. For drivers using GEM as their userspace
* buffer management interface this would be a GEM handle. Drivers are however
* free to use their own backing storage object handles, e.g. vmwgfx directly
* exposes special TTM handles to userspace and so expects TTM handles in the
* create ioctl and not GEM handles.
*
- * Framebuffers are tracked with struct &drm_framebuffer. They are published
+ * Framebuffers are tracked with &struct drm_framebuffer. They are published
* using drm_framebuffer_init() - after calling that function userspace can use
* and access the framebuffer object. The helper function
* drm_helper_mode_fill_fb_struct() can be used to pre-fill the required
@@ -55,11 +55,11 @@
* drivers can grab additional references with drm_framebuffer_reference() and
* drop them again with drm_framebuffer_unreference(). For driver-private
* framebuffers for which the last reference is never dropped (e.g. for the
- * fbdev framebuffer when the struct struct &drm_framebuffer is embedded into
+ * fbdev framebuffer when the struct &struct drm_framebuffer is embedded into
* the fbdev helper struct) drivers can manually clean up a framebuffer at
* module unload time with drm_framebuffer_unregister_private(). But doing this
- * is not recommended, and it's better to have a normal free-standing struct
- * &drm_framebuffer.
+ * is not recommended, and it's better to have a normal free-standing &struct
+ * drm_framebuffer.
*/
int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
@@ -432,8 +432,8 @@ int drm_mode_getfb(struct drm_device *dev,
r->height = fb->height;
r->width = fb->width;
- r->depth = fb->depth;
- r->bpp = fb->bits_per_pixel;
+ r->depth = fb->format->depth;
+ r->bpp = fb->format->cpp[0] * 8;
r->pitch = fb->pitches[0];
if (fb->funcs->create_handle) {
if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
@@ -470,7 +470,7 @@ int drm_mode_getfb(struct drm_device *dev,
* usb display-link, mipi manual update panels or edp panel self refresh modes.
*
* Modesetting drivers which always update the frontbuffer do not need to
- * implement the corresponding ->dirty framebuffer callback.
+ * implement the corresponding &drm_framebuffer_funcs.dirty callback.
*
* Called by the user via ioctl.
*
@@ -631,8 +631,11 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
{
int ret;
+ if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
+ return -EINVAL;
+
INIT_LIST_HEAD(&fb->filp_head);
- fb->dev = dev;
+
fb->funcs = funcs;
ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
@@ -706,10 +709,10 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
* @fb: framebuffer to remove
*
* Cleanup framebuffer. This function is intended to be used from the drivers
- * ->destroy callback. It can also be used to clean up driver private
- * framebuffers embedded into a larger structure.
+ * &drm_framebuffer_funcs.destroy callback. It can also be used to clean up
+ * driver private framebuffers embedded into a larger structure.
*
- * Note that this function does not remove the fb from active usuage - if it is
+ * Note that this function does not remove the fb from active usage - if it is
* still used anywhere, hilarity can ensue since userspace could call getfb on
* the id and get back -EINVAL. Obviously no concern at driver unload time.
*
@@ -790,3 +793,47 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
drm_framebuffer_unreference(fb);
}
EXPORT_SYMBOL(drm_framebuffer_remove);
+
+/**
+ * drm_framebuffer_plane_width - width of the plane given the first plane
+ * @width: width of the first plane
+ * @fb: the framebuffer
+ * @plane: plane index
+ *
+ * Returns:
+ * The width of @plane, given that the width of the first plane is @width.
+ */
+int drm_framebuffer_plane_width(int width,
+ const struct drm_framebuffer *fb, int plane)
+{
+ if (plane >= fb->format->num_planes)
+ return 0;
+
+ if (plane == 0)
+ return width;
+
+ return width / fb->format->hsub;
+}
+EXPORT_SYMBOL(drm_framebuffer_plane_width);
+
+/**
+ * drm_framebuffer_plane_height - height of the plane given the first plane
+ * @height: height of the first plane
+ * @fb: the framebuffer
+ * @plane: plane index
+ *
+ * Returns:
+ * The height of @plane, given that the height of the first plane is @height.
+ */
+int drm_framebuffer_plane_height(int height,
+ const struct drm_framebuffer *fb, int plane)
+{
+ if (plane >= fb->format->num_planes)
+ return 0;
+
+ if (plane == 0)
+ return height;
+
+ return height / fb->format->vsub;
+}
+EXPORT_SYMBOL(drm_framebuffer_plane_height);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 465bacd0a630..bc93de308673 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -316,8 +316,8 @@ EXPORT_SYMBOL(drm_gem_handle_delete);
* @dev: corresponding drm_device
* @handle: the dumb handle to remove
*
- * This implements the ->dumb_destroy kms driver callback for drivers which use
- * gem to manage their backing storage.
+ * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
+ * which use gem to manage their backing storage.
*/
int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
@@ -333,9 +333,9 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
* @obj: object to register
* @handlep: pointer to return the created handle to the caller
*
- * This expects the dev->object_name_lock to be held already and will drop it
- * before returning. Used to avoid races in establishing new handles when
- * importing an object from either an flink name or a dma-buf.
+ * This expects the &drm_device.object_name_lock to be held already and will
+ * drop it before returning. Used to avoid races in establishing new handles
+ * when importing an object from either an flink name or a dma-buf.
*
* Handles must be release again through drm_gem_handle_delete(). This is done
* when userspace closes @file_priv for all attached handles, or through the
@@ -447,8 +447,8 @@ EXPORT_SYMBOL(drm_gem_free_mmap_offset);
* structures.
*
* This routine allocates and attaches a fake offset for @obj, in cases where
- * the virtual size differs from the physical size (ie. obj->size). Otherwise
- * just use drm_gem_create_mmap_offset().
+ * the virtual size differs from the physical size (ie. &drm_gem_object.size).
+ * Otherwise just use drm_gem_create_mmap_offset().
*
* This function is idempotent and handles an already allocated mmap offset
* transparently. Drivers do not need to check for this case.
@@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release);
* @kref: kref of the object to free
*
* Called after the last reference to the object has been lost.
- * Must be called holding &drm_device->struct_mutex.
+ * Must be called holding &drm_device.struct_mutex.
*
* Frees the object
*/
@@ -813,7 +813,7 @@ EXPORT_SYMBOL(drm_gem_object_free);
* @obj: GEM buffer object
*
* This releases a reference to @obj. Callers must not hold the
- * dev->struct_mutex lock when calling this function.
+ * &drm_device.struct_mutex lock when calling this function.
*
* See also __drm_gem_object_unreference().
*/
@@ -840,9 +840,9 @@ EXPORT_SYMBOL(drm_gem_object_unreference_unlocked);
* drm_gem_object_unreference - release a GEM BO reference
* @obj: GEM buffer object
*
- * This releases a reference to @obj. Callers must hold the dev->struct_mutex
- * lock when calling this function, even when the driver doesn't use
- * dev->struct_mutex for anything.
+ * This releases a reference to @obj. Callers must hold the
+ * &drm_device.struct_mutex lock when calling this function, even when the
+ * driver doesn't use &drm_device.struct_mutex for anything.
*
* For drivers not encumbered with legacy locking use
* drm_gem_object_unreference_unlocked() instead.
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 33cd51632721..f7ba32bfe39b 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -176,8 +176,8 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
*
* This function frees the backing memory of the CMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
- * Drivers using the CMA helpers should set this as their DRM driver's
- * ->gem_free_object() callback.
+ * Drivers using the CMA helpers should set this as their
+ * &drm_driver.gem_free_object callback.
*/
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
* This aligns the pitch and size arguments to the minimum required. This is
* an internal helper that can be wrapped by a driver to account for hardware
* with more specific alignment requirements. It should not be used directly
- * as the ->dumb_create() callback in a DRM driver.
+ * as their &drm_driver.dumb_create callback.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -240,7 +240,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
* This function computes the pitch of the dumb buffer and rounds it up to an
* integer number of bytes per pixel. Drivers for hardware that doesn't have
* any additional restrictions on the pitch can directly use this function as
- * their ->dumb_create() callback.
+ * their &drm_driver.dumb_create callback.
*
* For hardware with additional restrictions, drivers can adjust the fields
* set up by userspace and pass the IOCTL data along to the
@@ -274,7 +274,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
*
* This function look up an object by its handle and returns the fake mmap
* offset associated with it. Drivers using the CMA helpers should set this
- * as their DRM driver's ->dumb_map_offset() callback.
+ * as their &drm_driver.dumb_map_offset callback.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -358,6 +358,77 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
}
EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+#ifndef CONFIG_MMU
+/**
+ * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
+ * @filp: file object
+ * @addr: memory address
+ * @len: buffer size
+ * @pgoff: page offset
+ * @flags: memory flags
+ *
+ * This function is used in noMMU platforms to propose address mapping
+ * for a given buffer.
+ * It's intended to be used as a direct handler for the struct
+ * &file_operations.get_unmapped_area operation.
+ *
+ * Returns:
+ * mapping address on success or a negative error code on failure.
+ */
+unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *obj = NULL;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_vma_offset_node *node;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ pgoff,
+ len >> PAGE_SHIFT);
+ if (likely(node)) {
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ /*
+ * When the object is being freed, after it hits 0-refcnt it
+ * proceeds to tear down the object. In the process it will
+ * attempt to remove the VMA offset and so acquire this
+ * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
+ * that matches our range, we know it is in the process of being
+ * destroyed and will be freed as soon as we release the lock -
+ * so we have to check for the 0-refcnted object and treat it as
+ * invalid.
+ */
+ if (!kref_get_unless_zero(&obj->refcount))
+ obj = NULL;
+ }
+
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+
+ if (!obj)
+ return -EINVAL;
+
+ if (!drm_vma_node_is_allowed(node, priv)) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EACCES;
+ }
+
+ cma_obj = to_drm_gem_cma_obj(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
+#endif
+
#ifdef CONFIG_DEBUG_FS
/**
* drm_gem_cma_describe - describe a CMA GEM object for debugfs
@@ -391,7 +462,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
*
* This function exports a scatter/gather table suitable for PRIME usage by
* calling the standard DMA mapping API. Drivers using the CMA helpers should
- * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
+ * set this as their &drm_driver.gem_prime_get_sg_table callback.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
@@ -429,8 +500,8 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
* This function imports a scatter/gather table exported via DMA-BUF by
* another driver. Imported buffers must be physically contiguous in memory
* (i.e. the scatter/gather table must contain a single entry). Drivers that
- * use the CMA helpers should set this as their DRM driver's
- * ->gem_prime_import_sg_table() callback.
+ * use the CMA helpers should set this as their
+ * &drm_driver.gem_prime_import_sg_table callback.
*
* Returns:
* A pointer to a newly created GEM object or an ERR_PTR-encoded negative
@@ -467,7 +538,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
*
* This function maps a buffer imported via DRM PRIME into a userspace
* process's address space. Drivers that use the CMA helpers should set this
- * as their DRM driver's ->gem_prime_mmap() callback.
+ * as their &drm_driver.gem_prime_mmap callback.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -496,7 +567,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
* virtual address space. Since the CMA buffers are already mapped into the
* kernel virtual address space this simply returns the cached virtual
* address. Drivers using the CMA helpers should set this as their DRM
- * driver's ->gem_prime_vmap() callback.
+ * driver's &drm_driver.gem_prime_vmap callback.
*
* Returns:
* The kernel virtual address of the CMA GEM object's backing store.
@@ -518,7 +589,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
* This function removes a buffer exported via DRM PRIME from the kernel's
* virtual address space. This is a no-op because CMA buffers cannot be
* unmapped from kernel space. Drivers using the CMA helpers should set this
- * as their DRM driver's ->gem_prime_vunmap() callback.
+ * as their &drm_driver.gem_prime_vunmap callback.
*/
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index b404287abb97..b2dc21e33ae0 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -63,6 +63,18 @@ void drm_global_release(void)
}
}
+/**
+ * drm_global_item_ref - Initialize and acquire reference to memory
+ * object
+ * @ref: Object for initialization
+ *
+ * This initializes a memory object, allocating memory and calling the
+ * .init() hook. Further calls will increase the reference count for
+ * that item.
+ *
+ * Returns:
+ * Zero on success, non-zero otherwise.
+ */
int drm_global_item_ref(struct drm_global_reference *ref)
{
int ret = 0;
@@ -97,6 +109,17 @@ error_unlock:
}
EXPORT_SYMBOL(drm_global_item_ref);
+/**
+ * drm_global_item_unref - Drop reference to memory
+ * object
+ * @ref: Object being removed
+ *
+ * Drop a reference to the memory object and eventually call the
+ * release() hook. The allocated object should be dropped in the
+ * release() hook or before calling this function
+ *
+ */
+
void drm_global_item_unref(struct drm_global_reference *ref)
{
struct drm_global_item *item = &glob[ref->global_type];
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index db80ec860e33..f37388cb2fde 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -31,6 +31,7 @@ void drm_lastclose(struct drm_device *dev);
/* drm_pci.c */
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void drm_pci_agp_destroy(struct drm_device *dev);
/* drm_prime.c */
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
@@ -58,10 +59,10 @@ extern unsigned int drm_timestamp_monotonic;
/* IOCTLS */
int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
-int drm_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int drm_legacy_irq_control(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_legacy_modeset_ctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* drm_auth.c */
int drm_getmagic(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index fed22c2b98b6..a7c61c23685a 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -95,9 +95,6 @@
* broken.
*/
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
/*
* Get the bus id.
*
@@ -115,11 +112,15 @@ static int drm_getunique(struct drm_device *dev, void *data,
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
+ mutex_lock(&master->dev->master_mutex);
if (u->unique_len >= master->unique_len) {
- if (copy_to_user(u->unique, master->unique, master->unique_len))
+ if (copy_to_user(u->unique, master->unique, master->unique_len)) {
+ mutex_unlock(&master->dev->master_mutex);
return -EFAULT;
+ }
}
u->unique_len = master->unique_len;
+ mutex_unlock(&master->dev->master_mutex);
return 0;
}
@@ -340,6 +341,7 @@ static int drm_setversion(struct drm_device *dev, void *data, struct drm_file *f
struct drm_set_version *sv = data;
int if_version, retcode = 0;
+ mutex_lock(&dev->master_mutex);
if (sv->drm_di_major != -1) {
if (sv->drm_di_major != DRM_IF_MAJOR ||
sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
@@ -374,6 +376,7 @@ done:
sv->drm_di_minor = DRM_IF_MINOR;
sv->drm_dd_major = dev->driver->major;
sv->drm_dd_minor = dev->driver->minor;
+ mutex_unlock(&dev->master_mutex);
return retcode;
}
@@ -475,15 +478,17 @@ static int drm_version(struct drm_device *dev, void *data,
return err;
}
-/*
+/**
* drm_ioctl_permit - Check ioctl permissions against caller
*
* @flags: ioctl permission flags.
* @file_priv: Pointer to struct drm_file identifying the caller.
*
* Checks whether the caller is allowed to run an ioctl with the
- * indicated permissions. If so, returns zero. Otherwise returns an
- * error code suitable for ioctl return.
+ * indicated permissions.
+ *
+ * Returns:
+ * Zero if allowed, -EACCES otherwise.
*/
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
@@ -528,15 +533,15 @@ EXPORT_SYMBOL(drm_ioctl_permit);
static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -575,7 +580,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if IS_ENABLED(CONFIG_AGP)
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -593,7 +598,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -729,9 +734,8 @@ long drm_ioctl(struct file *filp,
if (ksize > in_size)
memset(kdata + in_size, 0, ksize - in_size);
- /* Enforce sane locking for modern driver ioctls. Core ioctls are
- * too messy still. */
- if ((!drm_core_check_feature(dev, DRIVER_LEGACY) && is_driver_ioctl) ||
+ /* Enforce sane locking for modern driver ioctls. */
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY) ||
(ioctl->flags & DRM_UNLOCKED))
retcode = func(dev, kdata, file_priv);
else {
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 273625a85036..e06cf11ebb4a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -95,7 +95,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
*
* Only to be called from drm_crtc_vblank_on().
*
- * Note: caller must hold dev->vbl_lock since this reads & writes
+ * Note: caller must hold &drm_device.vbl_lock since this reads & writes
* device vblank fields.
*/
static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe)
@@ -142,7 +142,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
* Only necessary when going from off->on, to account for frames we
* didn't get an interrupt for.
*
- * Note: caller must hold dev->vbl_lock since this reads & writes
+ * Note: caller must hold &drm_device.vbl_lock since this reads & writes
* device vblank fields.
*/
static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
@@ -415,29 +415,6 @@ err:
}
EXPORT_SYMBOL(drm_vblank_init);
-static void drm_irq_vgaarb_nokms(void *cookie, bool state)
-{
- struct drm_device *dev = cookie;
-
- if (dev->driver->vgaarb_irq) {
- dev->driver->vgaarb_irq(dev, state);
- return;
- }
-
- if (!dev->irq_enabled)
- return;
-
- if (state) {
- if (dev->driver->irq_uninstall)
- dev->driver->irq_uninstall(dev);
- } else {
- if (dev->driver->irq_preinstall)
- dev->driver->irq_preinstall(dev);
- if (dev->driver->irq_postinstall)
- dev->driver->irq_postinstall(dev);
- }
-}
-
/**
* drm_irq_install - install IRQ handler
* @dev: DRM device
@@ -449,7 +426,7 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
*
* This is the simplified helper interface provided for drivers with no special
* needs. Drivers which need to install interrupt handlers for multiple
- * interrupts must instead set drm_device->irq_enabled to signal the DRM core
+ * interrupts must instead set &drm_device.irq_enabled to signal the DRM core
* that vblank interrupts are available.
*
* Returns:
@@ -492,9 +469,6 @@ int drm_irq_install(struct drm_device *dev, int irq)
return ret;
}
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
-
/* After installing handler */
if (dev->driver->irq_postinstall)
ret = dev->driver->irq_postinstall(dev);
@@ -519,7 +493,7 @@ EXPORT_SYMBOL(drm_irq_install);
* Calls the driver's irq_uninstall() function and unregisters the IRQ handler.
* This should only be called by drivers which used drm_irq_install() to set up
* their interrupt handler. Other drivers must only reset
- * drm_device->irq_enabled to false.
+ * &drm_device.irq_enabled to false.
*
* Note that for kernel modesetting drivers it is a bug if this function fails.
* The sanity checks are only to catch buggy user modesetting drivers which call
@@ -579,19 +553,8 @@ int drm_irq_uninstall(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_irq_uninstall);
-/*
- * IRQ control ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_control structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls irq_install() or irq_uninstall() according to \p arg.
- */
-int drm_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_irq_control(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_control *ctl = data;
int ret = 0, irq;
@@ -993,12 +956,11 @@ static void send_vblank_event(struct drm_device *dev,
* period. This helper function implements exactly the required vblank arming
* behaviour.
*
- * NOTE: Drivers using this to send out the event in struct &drm_crtc_state
- * as part of an atomic commit must ensure that the next vblank happens at
- * exactly the same time as the atomic commit is committed to the hardware. This
- * function itself does **not** protect again the next vblank interrupt racing
- * with either this function call or the atomic commit operation. A possible
- * sequence could be:
+ * NOTE: Drivers using this to send out the &drm_crtc_state.event as part of an
+ * atomic commit must ensure that the next vblank happens at exactly the same
+ * time as the atomic commit is committed to the hardware. This function itself
+ * does **not** protect again the next vblank interrupt racing with either this
+ * function call or the atomic commit operation. A possible sequence could be:
*
* 1. Driver commits new hardware state into vblank-synchronized registers.
* 2. A vblank happens, committing the hardware state. Also the corresponding
@@ -1442,19 +1404,8 @@ static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
}
}
-/*
- * drm_modeset_ctl - handle vblank event counter changes across mode switch
- * @DRM_IOCTL_ARGS: standard ioctl arguments
- *
- * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
- * ioctls around modesetting so that any lost vblank events are accounted for.
- *
- * Generally the counter will reset across mode sets. If interrupts are
- * enabled around this call, we don't have to do anything since the counter
- * will have already been incremented.
- */
-int drm_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_modeset_ctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
unsigned int pipe;
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index c6f422e879dd..e4bb5ad747c8 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -74,7 +74,14 @@ int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+#ifdef CONFIG_DRM_VM
void drm_legacy_vma_flush(struct drm_device *d);
+#else
+static inline void drm_legacy_vma_flush(struct drm_device *d)
+{
+ /* do nothing */
+}
+#endif
/*
* AGP Support
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index ca1e344f318d..8bfb0b327267 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -1,6 +1,7 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * Copyright 2016 Intel Corporation
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,9 +32,9 @@
* class implementation for more advanced memory managers.
*
* Note that the algorithm used is quite simple and there might be substantial
- * performance gains if a smarter free list is implemented. Currently it is just an
- * unordered stack of free regions. This could easily be improved if an RB-tree
- * is used instead. At least if we expect heavy fragmentation.
+ * performance gains if a smarter free list is implemented. Currently it is
+ * just an unordered stack of free regions. This could easily be improved if
+ * an RB-tree is used instead. At least if we expect heavy fragmentation.
*
* Aligned allocations can also see improvement.
*
@@ -58,8 +59,8 @@
*
* The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
* Drivers are free to embed either of them into their own suitable
- * datastructures. drm_mm itself will not do any allocations of its own, so if
- * drivers choose not to embed nodes they need to still allocate them
+ * datastructures. drm_mm itself will not do any memory allocations of its own,
+ * so if drivers choose not to embed nodes they need to still allocate them
* themselves.
*
* The range allocator also supports reservation of preallocated blocks. This is
@@ -67,7 +68,7 @@
* where an object needs to be created which exactly matches the firmware's
* scanout target. As long as the range is still free it can be inserted anytime
* after the allocator is initialized, which helps with avoiding looped
- * depencies in the driver load sequence.
+ * dependencies in the driver load sequence.
*
* drm_mm maintains a stack of most recently freed holes, which of all
* simplistic datastructures seems to be a fairly decent approach to clustering
@@ -77,33 +78,25 @@
* steep cliff not a real concern. Removing a node again is O(1).
*
* drm_mm supports a few features: Alignment and range restrictions can be
- * supplied. Further more every &drm_mm_node has a color value (which is just an
- * opaqua unsigned long) which in conjunction with a driver callback can be used
+ * supplied. Furthermore every &drm_mm_node has a color value (which is just an
+ * opaque unsigned long) which in conjunction with a driver callback can be used
* to implement sophisticated placement restrictions. The i915 DRM driver uses
* this to implement guard pages between incompatible caching domains in the
* graphics TT.
*
- * Two behaviors are supported for searching and allocating: bottom-up and top-down.
- * The default is bottom-up. Top-down allocation can be used if the memory area
- * has different restrictions, or just to reduce fragmentation.
+ * Two behaviors are supported for searching and allocating: bottom-up and
+ * top-down. The default is bottom-up. Top-down allocation can be used if the
+ * memory area has different restrictions, or just to reduce fragmentation.
*
* Finally iteration helpers to walk all nodes and all holes are provided as are
* some basic allocator dumpers for debugging.
+ *
+ * Note that this range allocator is not thread-safe, drivers need to protect
+ * modifications with their on locking. The idea behind this is that for a full
+ * memory manager additional data needs to be protected anyway, hence internal
+ * locking would be fully redundant.
*/
-static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
- u64 size,
- unsigned alignment,
- unsigned long color,
- enum drm_mm_search_flags flags);
-static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
- u64 size,
- unsigned alignment,
- unsigned long color,
- u64 start,
- u64 end,
- enum drm_mm_search_flags flags);
-
#ifdef CONFIG_DRM_DEBUG_MM
#include <linux/stackdepot.h>
@@ -138,7 +131,7 @@ static void show_leaks(struct drm_mm *mm)
if (!buf)
return;
- list_for_each_entry(node, &mm->head_node.node_list, node_list) {
+ list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
struct stack_trace trace = {
.entries = entries,
.max_entries = STACKDEPTH
@@ -174,9 +167,9 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
START, LAST, static inline, drm_mm_interval_tree)
struct drm_mm_node *
-__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
+__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
{
- return drm_mm_interval_tree_iter_first(&mm->interval_tree,
+ return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
start, last);
}
EXPORT_SYMBOL(__drm_mm_interval_first);
@@ -225,66 +218,151 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
&drm_mm_interval_tree_augment);
}
-static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
- struct drm_mm_node *node,
- u64 size, unsigned alignment,
- unsigned long color,
- enum drm_mm_allocator_flags flags)
+#define RB_INSERT(root, member, expr) do { \
+ struct rb_node **link = &root.rb_node, *rb = NULL; \
+ u64 x = expr(node); \
+ while (*link) { \
+ rb = *link; \
+ if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
+ link = &rb->rb_left; \
+ else \
+ link = &rb->rb_right; \
+ } \
+ rb_link_node(&node->member, rb, link); \
+ rb_insert_color(&node->member, &root); \
+} while (0)
+
+#define HOLE_SIZE(NODE) ((NODE)->hole_size)
+#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
+
+static void add_hole(struct drm_mm_node *node)
{
- struct drm_mm *mm = hole_node->mm;
- u64 hole_start = drm_mm_hole_node_start(hole_node);
- u64 hole_end = drm_mm_hole_node_end(hole_node);
- u64 adj_start = hole_start;
- u64 adj_end = hole_end;
+ struct drm_mm *mm = node->mm;
- BUG_ON(node->allocated);
+ node->hole_size =
+ __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
+ DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
- if (mm->color_adjust)
- mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+ RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+ RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
+
+ list_add(&node->hole_stack, &mm->hole_stack);
+}
- if (flags & DRM_MM_CREATE_TOP)
- adj_start = adj_end - size;
+static void rm_hole(struct drm_mm_node *node)
+{
+ DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
- if (alignment) {
- u64 tmp = adj_start;
- unsigned rem;
+ list_del(&node->hole_stack);
+ rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+ rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+ node->hole_size = 0;
- rem = do_div(tmp, alignment);
- if (rem) {
- if (flags & DRM_MM_CREATE_TOP)
- adj_start -= rem;
- else
- adj_start += alignment - rem;
+ DRM_MM_BUG_ON(drm_mm_hole_follows(node));
+}
+
+static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
+{
+ return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
+}
+
+static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
+{
+ return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
+}
+
+static inline u64 rb_hole_size(struct rb_node *rb)
+{
+ return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
+}
+
+static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
+{
+ struct rb_node *best = NULL;
+ struct rb_node **link = &mm->holes_size.rb_node;
+
+ while (*link) {
+ struct rb_node *rb = *link;
+
+ if (size <= rb_hole_size(rb)) {
+ link = &rb->rb_left;
+ best = rb;
+ } else {
+ link = &rb->rb_right;
}
}
- BUG_ON(adj_start < hole_start);
- BUG_ON(adj_end > hole_end);
+ return rb_hole_size_to_node(best);
+}
+
+static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
+{
+ struct drm_mm_node *node = NULL;
+ struct rb_node **link = &mm->holes_addr.rb_node;
- if (adj_start == hole_start) {
- hole_node->hole_follows = 0;
- list_del(&hole_node->hole_stack);
+ while (*link) {
+ u64 hole_start;
+
+ node = rb_hole_addr_to_node(*link);
+ hole_start = __drm_mm_hole_node_start(node);
+
+ if (addr < hole_start)
+ link = &node->rb_hole_addr.rb_left;
+ else if (addr > hole_start + node->hole_size)
+ link = &node->rb_hole_addr.rb_right;
+ else
+ break;
}
- node->start = adj_start;
- node->size = size;
- node->mm = mm;
- node->color = color;
- node->allocated = 1;
+ return node;
+}
+
+static struct drm_mm_node *
+first_hole(struct drm_mm *mm,
+ u64 start, u64 end, u64 size,
+ enum drm_mm_insert_mode mode)
+{
+ if (RB_EMPTY_ROOT(&mm->holes_size))
+ return NULL;
- list_add(&node->node_list, &hole_node->node_list);
+ switch (mode) {
+ default:
+ case DRM_MM_INSERT_BEST:
+ return best_hole(mm, size);
- drm_mm_interval_tree_add_node(hole_node, node);
+ case DRM_MM_INSERT_LOW:
+ return find_hole(mm, start);
- BUG_ON(node->start + node->size > adj_end);
+ case DRM_MM_INSERT_HIGH:
+ return find_hole(mm, end);
- node->hole_follows = 0;
- if (__drm_mm_hole_node_start(node) < hole_end) {
- list_add(&node->hole_stack, &mm->hole_stack);
- node->hole_follows = 1;
+ case DRM_MM_INSERT_EVICT:
+ return list_first_entry_or_null(&mm->hole_stack,
+ struct drm_mm_node,
+ hole_stack);
}
+}
- save_stack(node);
+static struct drm_mm_node *
+next_hole(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ enum drm_mm_insert_mode mode)
+{
+ switch (mode) {
+ default:
+ case DRM_MM_INSERT_BEST:
+ return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
+
+ case DRM_MM_INSERT_LOW:
+ return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
+
+ case DRM_MM_INSERT_HIGH:
+ return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
+
+ case DRM_MM_INSERT_EVICT:
+ node = list_next_entry(node, hole_stack);
+ return &node->hole_stack == &mm->hole_stack ? NULL : node;
+ }
}
/**
@@ -292,11 +370,11 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
* @mm: drm_mm allocator to insert @node into
* @node: drm_mm_node to insert
*
- * This functions inserts an already set-up drm_mm_node into the allocator,
- * meaning that start, size and color must be set by the caller. This is useful
- * to initialize the allocator with preallocated objects which must be set-up
- * before the range allocator can be set-up, e.g. when taking over a firmware
- * framebuffer.
+ * This functions inserts an already set-up &drm_mm_node into the allocator,
+ * meaning that start, size and color must be set by the caller. All other
+ * fields must be cleared to 0. This is useful to initialize the allocator with
+ * preallocated objects which must be set-up before the range allocator can be
+ * set-up, e.g. when taking over a firmware framebuffer.
*
* Returns:
* 0 on success, -ENOSPC if there's no hole where @node is.
@@ -308,28 +386,17 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
u64 hole_start, hole_end;
u64 adj_start, adj_end;
- if (WARN_ON(node->size == 0))
- return -EINVAL;
-
end = node->start + node->size;
+ if (unlikely(end <= node->start))
+ return -ENOSPC;
/* Find the relevant hole to add our node to */
- hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
- node->start, ~(u64)0);
- if (hole) {
- if (hole->start < end)
- return -ENOSPC;
- } else {
- hole = list_entry(&mm->head_node.node_list,
- typeof(*hole), node_list);
- }
-
- hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
- if (!hole->hole_follows)
+ hole = find_hole(mm, node->start);
+ if (!hole)
return -ENOSPC;
adj_start = hole_start = __drm_mm_hole_node_start(hole);
- adj_end = hole_end = __drm_mm_hole_node_end(hole);
+ adj_end = hole_end = hole_start + hole->hole_size;
if (mm->color_adjust)
mm->color_adjust(hole, node->color, &adj_start, &adj_end);
@@ -338,174 +405,130 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
return -ENOSPC;
node->mm = mm;
- node->allocated = 1;
list_add(&node->node_list, &hole->node_list);
-
drm_mm_interval_tree_add_node(hole, node);
+ node->allocated = true;
+ node->hole_size = 0;
- if (node->start == hole_start) {
- hole->hole_follows = 0;
- list_del(&hole->hole_stack);
- }
-
- node->hole_follows = 0;
- if (end != hole_end) {
- list_add(&node->hole_stack, &mm->hole_stack);
- node->hole_follows = 1;
- }
+ rm_hole(hole);
+ if (node->start > hole_start)
+ add_hole(hole);
+ if (end < hole_end)
+ add_hole(node);
save_stack(node);
-
return 0;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
/**
- * drm_mm_insert_node_generic - search for space and insert @node
+ * drm_mm_insert_node_in_range - ranged search for space and insert @node
* @mm: drm_mm to allocate from
* @node: preallocate node to insert
* @size: size of the allocation
* @alignment: alignment of the allocation
* @color: opaque tag value to use for this node
- * @sflags: flags to fine-tune the allocation search
- * @aflags: flags to fine-tune the allocation behavior
+ * @range_start: start of the allowed range for this node
+ * @range_end: end of the allowed range for this node
+ * @mode: fine-tune the allocation search and placement
*
- * The preallocated node must be cleared to 0.
+ * The preallocated @node must be cleared to 0.
*
* Returns:
* 0 on success, -ENOSPC if there's no suitable hole.
*/
-int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
- u64 size, unsigned alignment,
- unsigned long color,
- enum drm_mm_search_flags sflags,
- enum drm_mm_allocator_flags aflags)
+int drm_mm_insert_node_in_range(struct drm_mm * const mm,
+ struct drm_mm_node * const node,
+ u64 size, u64 alignment,
+ unsigned long color,
+ u64 range_start, u64 range_end,
+ enum drm_mm_insert_mode mode)
{
- struct drm_mm_node *hole_node;
+ struct drm_mm_node *hole;
+ u64 remainder_mask;
- if (WARN_ON(size == 0))
- return -EINVAL;
+ DRM_MM_BUG_ON(range_start >= range_end);
- hole_node = drm_mm_search_free_generic(mm, size, alignment,
- color, sflags);
- if (!hole_node)
+ if (unlikely(size == 0 || range_end - range_start < size))
return -ENOSPC;
- drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_insert_node_generic);
-
-static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
- struct drm_mm_node *node,
- u64 size, unsigned alignment,
- unsigned long color,
- u64 start, u64 end,
- enum drm_mm_allocator_flags flags)
-{
- struct drm_mm *mm = hole_node->mm;
- u64 hole_start = drm_mm_hole_node_start(hole_node);
- u64 hole_end = drm_mm_hole_node_end(hole_node);
- u64 adj_start = hole_start;
- u64 adj_end = hole_end;
+ if (alignment <= 1)
+ alignment = 0;
- BUG_ON(!hole_node->hole_follows || node->allocated);
+ remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
+ for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
+ hole = next_hole(mm, hole, mode)) {
+ u64 hole_start = __drm_mm_hole_node_start(hole);
+ u64 hole_end = hole_start + hole->hole_size;
+ u64 adj_start, adj_end;
+ u64 col_start, col_end;
- if (adj_start < start)
- adj_start = start;
- if (adj_end > end)
- adj_end = end;
+ if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
+ break;
- if (mm->color_adjust)
- mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+ if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
+ break;
- if (flags & DRM_MM_CREATE_TOP)
- adj_start = adj_end - size;
+ col_start = hole_start;
+ col_end = hole_end;
+ if (mm->color_adjust)
+ mm->color_adjust(hole, color, &col_start, &col_end);
- if (alignment) {
- u64 tmp = adj_start;
- unsigned rem;
+ adj_start = max(col_start, range_start);
+ adj_end = min(col_end, range_end);
- rem = do_div(tmp, alignment);
- if (rem) {
- if (flags & DRM_MM_CREATE_TOP)
- adj_start -= rem;
- else
- adj_start += alignment - rem;
- }
- }
+ if (adj_end <= adj_start || adj_end - adj_start < size)
+ continue;
- if (adj_start == hole_start) {
- hole_node->hole_follows = 0;
- list_del(&hole_node->hole_stack);
- }
+ if (mode == DRM_MM_INSERT_HIGH)
+ adj_start = adj_end - size;
- node->start = adj_start;
- node->size = size;
- node->mm = mm;
- node->color = color;
- node->allocated = 1;
+ if (alignment) {
+ u64 rem;
- list_add(&node->node_list, &hole_node->node_list);
+ if (likely(remainder_mask))
+ rem = adj_start & remainder_mask;
+ else
+ div64_u64_rem(adj_start, alignment, &rem);
+ if (rem) {
+ adj_start -= rem;
+ if (mode != DRM_MM_INSERT_HIGH)
+ adj_start += alignment;
- drm_mm_interval_tree_add_node(hole_node, node);
+ if (adj_start < max(col_start, range_start) ||
+ min(col_end, range_end) - adj_start < size)
+ continue;
- BUG_ON(node->start < start);
- BUG_ON(node->start < adj_start);
- BUG_ON(node->start + node->size > adj_end);
- BUG_ON(node->start + node->size > end);
+ if (adj_end <= adj_start ||
+ adj_end - adj_start < size)
+ continue;
+ }
+ }
- node->hole_follows = 0;
- if (__drm_mm_hole_node_start(node) < hole_end) {
- list_add(&node->hole_stack, &mm->hole_stack);
- node->hole_follows = 1;
- }
+ node->mm = mm;
+ node->size = size;
+ node->start = adj_start;
+ node->color = color;
+ node->hole_size = 0;
- save_stack(node);
-}
+ list_add(&node->node_list, &hole->node_list);
+ drm_mm_interval_tree_add_node(hole, node);
+ node->allocated = true;
-/**
- * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
- * @mm: drm_mm to allocate from
- * @node: preallocate node to insert
- * @size: size of the allocation
- * @alignment: alignment of the allocation
- * @color: opaque tag value to use for this node
- * @start: start of the allowed range for this node
- * @end: end of the allowed range for this node
- * @sflags: flags to fine-tune the allocation search
- * @aflags: flags to fine-tune the allocation behavior
- *
- * The preallocated node must be cleared to 0.
- *
- * Returns:
- * 0 on success, -ENOSPC if there's no suitable hole.
- */
-int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
- u64 size, unsigned alignment,
- unsigned long color,
- u64 start, u64 end,
- enum drm_mm_search_flags sflags,
- enum drm_mm_allocator_flags aflags)
-{
- struct drm_mm_node *hole_node;
+ rm_hole(hole);
+ if (adj_start > hole_start)
+ add_hole(hole);
+ if (adj_start + size < hole_end)
+ add_hole(node);
- if (WARN_ON(size == 0))
- return -EINVAL;
-
- hole_node = drm_mm_search_free_in_range_generic(mm,
- size, alignment, color,
- start, end, sflags);
- if (!hole_node)
- return -ENOSPC;
+ save_stack(node);
+ return 0;
+ }
- drm_mm_insert_helper_range(hole_node, node,
- size, alignment, color,
- start, end, aflags);
- return 0;
+ return -ENOSPC;
}
-EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
+EXPORT_SYMBOL(drm_mm_insert_node_in_range);
/**
* drm_mm_remove_node - Remove a memory node from the allocator.
@@ -513,150 +536,30 @@ EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
*
* This just removes a node from its drm_mm allocator. The node does not need to
* be cleared again before it can be re-inserted into this or any other drm_mm
- * allocator. It is a bug to call this function on a un-allocated node.
+ * allocator. It is a bug to call this function on a unallocated node.
*/
void drm_mm_remove_node(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
- if (WARN_ON(!node->allocated))
- return;
-
- BUG_ON(node->scanned_block || node->scanned_prev_free
- || node->scanned_next_free);
-
- prev_node =
- list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+ DRM_MM_BUG_ON(!node->allocated);
+ DRM_MM_BUG_ON(node->scanned_block);
- if (node->hole_follows) {
- BUG_ON(__drm_mm_hole_node_start(node) ==
- __drm_mm_hole_node_end(node));
- list_del(&node->hole_stack);
- } else
- BUG_ON(__drm_mm_hole_node_start(node) !=
- __drm_mm_hole_node_end(node));
+ prev_node = list_prev_entry(node, node_list);
-
- if (!prev_node->hole_follows) {
- prev_node->hole_follows = 1;
- list_add(&prev_node->hole_stack, &mm->hole_stack);
- } else
- list_move(&prev_node->hole_stack, &mm->hole_stack);
+ if (drm_mm_hole_follows(node))
+ rm_hole(node);
drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
- node->allocated = 0;
-}
-EXPORT_SYMBOL(drm_mm_remove_node);
-
-static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
-{
- if (end - start < size)
- return 0;
-
- if (alignment) {
- u64 tmp = start;
- unsigned rem;
-
- rem = do_div(tmp, alignment);
- if (rem)
- start += alignment - rem;
- }
-
- return end >= start + size;
-}
+ node->allocated = false;
-static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
- u64 size,
- unsigned alignment,
- unsigned long color,
- enum drm_mm_search_flags flags)
-{
- struct drm_mm_node *entry;
- struct drm_mm_node *best;
- u64 adj_start;
- u64 adj_end;
- u64 best_size;
-
- BUG_ON(mm->scanned_blocks);
-
- best = NULL;
- best_size = ~0UL;
-
- __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
- flags & DRM_MM_SEARCH_BELOW) {
- u64 hole_size = adj_end - adj_start;
-
- if (mm->color_adjust) {
- mm->color_adjust(entry, color, &adj_start, &adj_end);
- if (adj_end <= adj_start)
- continue;
- }
-
- if (!check_free_hole(adj_start, adj_end, size, alignment))
- continue;
-
- if (!(flags & DRM_MM_SEARCH_BEST))
- return entry;
-
- if (hole_size < best_size) {
- best = entry;
- best_size = hole_size;
- }
- }
-
- return best;
-}
-
-static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
- u64 size,
- unsigned alignment,
- unsigned long color,
- u64 start,
- u64 end,
- enum drm_mm_search_flags flags)
-{
- struct drm_mm_node *entry;
- struct drm_mm_node *best;
- u64 adj_start;
- u64 adj_end;
- u64 best_size;
-
- BUG_ON(mm->scanned_blocks);
-
- best = NULL;
- best_size = ~0UL;
-
- __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
- flags & DRM_MM_SEARCH_BELOW) {
- u64 hole_size = adj_end - adj_start;
-
- if (adj_start < start)
- adj_start = start;
- if (adj_end > end)
- adj_end = end;
-
- if (mm->color_adjust) {
- mm->color_adjust(entry, color, &adj_start, &adj_end);
- if (adj_end <= adj_start)
- continue;
- }
-
- if (!check_free_hole(adj_start, adj_end, size, alignment))
- continue;
-
- if (!(flags & DRM_MM_SEARCH_BEST))
- return entry;
-
- if (hole_size < best_size) {
- best = entry;
- best_size = hole_size;
- }
- }
-
- return best;
+ if (drm_mm_hole_follows(prev_node))
+ rm_hole(prev_node);
+ add_hole(prev_node);
}
+EXPORT_SYMBOL(drm_mm_remove_node);
/**
* drm_mm_replace_node - move an allocation from @old to @new
@@ -669,119 +572,114 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
+ DRM_MM_BUG_ON(!old->allocated);
+
+ *new = *old;
+
list_replace(&old->node_list, &new->node_list);
- list_replace(&old->hole_stack, &new->hole_stack);
rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
- new->hole_follows = old->hole_follows;
- new->mm = old->mm;
- new->start = old->start;
- new->size = old->size;
- new->color = old->color;
- new->__subtree_last = old->__subtree_last;
-
- old->allocated = 0;
- new->allocated = 1;
+
+ if (drm_mm_hole_follows(old)) {
+ list_replace(&old->hole_stack, &new->hole_stack);
+ rb_replace_node(&old->rb_hole_size,
+ &new->rb_hole_size,
+ &old->mm->holes_size);
+ rb_replace_node(&old->rb_hole_addr,
+ &new->rb_hole_addr,
+ &old->mm->holes_addr);
+ }
+
+ old->allocated = false;
+ new->allocated = true;
}
EXPORT_SYMBOL(drm_mm_replace_node);
/**
- * DOC: lru scan roaster
+ * DOC: lru scan roster
*
* Very often GPUs need to have continuous allocations for a given object. When
* evicting objects to make space for a new one it is therefore not most
* efficient when we simply start to select all objects from the tail of an LRU
* until there's a suitable hole: Especially for big objects or nodes that
* otherwise have special allocation constraints there's a good chance we evict
- * lots of (smaller) objects unecessarily.
+ * lots of (smaller) objects unnecessarily.
*
* The DRM range allocator supports this use-case through the scanning
* interfaces. First a scan operation needs to be initialized with
- * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
- * objects to the roaster (probably by walking an LRU list, but this can be
- * freely implemented) until a suitable hole is found or there's no further
- * evitable object.
- *
- * The the driver must walk through all objects again in exactly the reverse
+ * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
+ * objects to the roster, probably by walking an LRU list, but this can be
+ * freely implemented. Eviction candiates are added using
+ * drm_mm_scan_add_block() until a suitable hole is found or there are no
+ * further evictable objects. Eviction roster metadata is tracked in &struct
+ * drm_mm_scan.
+ *
+ * The driver must walk through all objects again in exactly the reverse
* order to restore the allocator state. Note that while the allocator is used
* in the scan mode no other operation is allowed.
*
- * Finally the driver evicts all objects selected in the scan. Adding and
- * removing an object is O(1), and since freeing a node is also O(1) the overall
- * complexity is O(scanned_objects). So like the free stack which needs to be
- * walked before a scan operation even begins this is linear in the number of
- * objects. It doesn't seem to hurt badly.
- */
-
-/**
- * drm_mm_init_scan - initialize lru scanning
- * @mm: drm_mm to scan
- * @size: size of the allocation
- * @alignment: alignment of the allocation
- * @color: opaque tag value to use for the allocation
- *
- * This simply sets up the scanning routines with the parameters for the desired
- * hole. Note that there's no need to specify allocation flags, since they only
- * change the place a node is allocated from within a suitable hole.
- *
- * Warning:
- * As long as the scan list is non-empty, no other operations than
- * adding/removing nodes to/from the scan list are allowed.
+ * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
+ * reported true) in the scan, and any overlapping nodes after color adjustment
+ * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
+ * since freeing a node is also O(1) the overall complexity is
+ * O(scanned_objects). So like the free stack which needs to be walked before a
+ * scan operation even begins this is linear in the number of objects. It
+ * doesn't seem to hurt too badly.
*/
-void drm_mm_init_scan(struct drm_mm *mm,
- u64 size,
- unsigned alignment,
- unsigned long color)
-{
- mm->scan_color = color;
- mm->scan_alignment = alignment;
- mm->scan_size = size;
- mm->scanned_blocks = 0;
- mm->scan_hit_start = 0;
- mm->scan_hit_end = 0;
- mm->scan_check_range = 0;
- mm->prev_scanned_node = NULL;
-}
-EXPORT_SYMBOL(drm_mm_init_scan);
/**
- * drm_mm_init_scan - initialize range-restricted lru scanning
+ * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
+ * @scan: scan state
* @mm: drm_mm to scan
* @size: size of the allocation
* @alignment: alignment of the allocation
* @color: opaque tag value to use for the allocation
* @start: start of the allowed range for the allocation
* @end: end of the allowed range for the allocation
+ * @mode: fine-tune the allocation search and placement
*
* This simply sets up the scanning routines with the parameters for the desired
- * hole. Note that there's no need to specify allocation flags, since they only
- * change the place a node is allocated from within a suitable hole.
+ * hole.
*
* Warning:
* As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
-void drm_mm_init_scan_with_range(struct drm_mm *mm,
+void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
+ struct drm_mm *mm,
u64 size,
- unsigned alignment,
+ u64 alignment,
unsigned long color,
u64 start,
- u64 end)
+ u64 end,
+ enum drm_mm_insert_mode mode)
{
- mm->scan_color = color;
- mm->scan_alignment = alignment;
- mm->scan_size = size;
- mm->scanned_blocks = 0;
- mm->scan_hit_start = 0;
- mm->scan_hit_end = 0;
- mm->scan_start = start;
- mm->scan_end = end;
- mm->scan_check_range = 1;
- mm->prev_scanned_node = NULL;
+ DRM_MM_BUG_ON(start >= end);
+ DRM_MM_BUG_ON(!size || size > end - start);
+ DRM_MM_BUG_ON(mm->scan_active);
+
+ scan->mm = mm;
+
+ if (alignment <= 1)
+ alignment = 0;
+
+ scan->color = color;
+ scan->alignment = alignment;
+ scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
+ scan->size = size;
+ scan->mode = mode;
+
+ DRM_MM_BUG_ON(end <= start);
+ scan->range_start = start;
+ scan->range_end = end;
+
+ scan->hit_start = U64_MAX;
+ scan->hit_end = 0;
}
-EXPORT_SYMBOL(drm_mm_init_scan_with_range);
+EXPORT_SYMBOL(drm_mm_scan_init_with_range);
/**
* drm_mm_scan_add_block - add a node to the scan list
+ * @scan: the active drm_mm scanner
* @node: drm_mm_node to add
*
* Add a node to the scan list that might be freed to make space for the desired
@@ -790,105 +688,165 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
* Returns:
* True if a hole has been found, false otherwise.
*/
-bool drm_mm_scan_add_block(struct drm_mm_node *node)
+bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
+ struct drm_mm_node *node)
{
- struct drm_mm *mm = node->mm;
- struct drm_mm_node *prev_node;
+ struct drm_mm *mm = scan->mm;
+ struct drm_mm_node *hole;
u64 hole_start, hole_end;
+ u64 col_start, col_end;
u64 adj_start, adj_end;
- mm->scanned_blocks++;
+ DRM_MM_BUG_ON(node->mm != mm);
+ DRM_MM_BUG_ON(!node->allocated);
+ DRM_MM_BUG_ON(node->scanned_block);
+ node->scanned_block = true;
+ mm->scan_active++;
+
+ /* Remove this block from the node_list so that we enlarge the hole
+ * (distance between the end of our previous node and the start of
+ * or next), without poisoning the link so that we can restore it
+ * later in drm_mm_scan_remove_block().
+ */
+ hole = list_prev_entry(node, node_list);
+ DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
+ __list_del_entry(&node->node_list);
+
+ hole_start = __drm_mm_hole_node_start(hole);
+ hole_end = __drm_mm_hole_node_end(hole);
+
+ col_start = hole_start;
+ col_end = hole_end;
+ if (mm->color_adjust)
+ mm->color_adjust(hole, scan->color, &col_start, &col_end);
- BUG_ON(node->scanned_block);
- node->scanned_block = 1;
+ adj_start = max(col_start, scan->range_start);
+ adj_end = min(col_end, scan->range_end);
+ if (adj_end <= adj_start || adj_end - adj_start < scan->size)
+ return false;
- prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
- node_list);
+ if (scan->mode == DRM_MM_INSERT_HIGH)
+ adj_start = adj_end - scan->size;
- node->scanned_preceeds_hole = prev_node->hole_follows;
- prev_node->hole_follows = 1;
- list_del(&node->node_list);
- node->node_list.prev = &prev_node->node_list;
- node->node_list.next = &mm->prev_scanned_node->node_list;
- mm->prev_scanned_node = node;
-
- adj_start = hole_start = drm_mm_hole_node_start(prev_node);
- adj_end = hole_end = drm_mm_hole_node_end(prev_node);
-
- if (mm->scan_check_range) {
- if (adj_start < mm->scan_start)
- adj_start = mm->scan_start;
- if (adj_end > mm->scan_end)
- adj_end = mm->scan_end;
- }
+ if (scan->alignment) {
+ u64 rem;
- if (mm->color_adjust)
- mm->color_adjust(prev_node, mm->scan_color,
- &adj_start, &adj_end);
-
- if (check_free_hole(adj_start, adj_end,
- mm->scan_size, mm->scan_alignment)) {
- mm->scan_hit_start = hole_start;
- mm->scan_hit_end = hole_end;
- return true;
+ if (likely(scan->remainder_mask))
+ rem = adj_start & scan->remainder_mask;
+ else
+ div64_u64_rem(adj_start, scan->alignment, &rem);
+ if (rem) {
+ adj_start -= rem;
+ if (scan->mode != DRM_MM_INSERT_HIGH)
+ adj_start += scan->alignment;
+ if (adj_start < max(col_start, scan->range_start) ||
+ min(col_end, scan->range_end) - adj_start < scan->size)
+ return false;
+
+ if (adj_end <= adj_start ||
+ adj_end - adj_start < scan->size)
+ return false;
+ }
}
- return false;
+ scan->hit_start = adj_start;
+ scan->hit_end = adj_start + scan->size;
+
+ DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
+ DRM_MM_BUG_ON(scan->hit_start < hole_start);
+ DRM_MM_BUG_ON(scan->hit_end > hole_end);
+
+ return true;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);
/**
* drm_mm_scan_remove_block - remove a node from the scan list
+ * @scan: the active drm_mm scanner
* @node: drm_mm_node to remove
*
- * Nodes _must_ be removed in the exact same order from the scan list as they
- * have been added, otherwise the internal state of the memory manager will be
- * corrupted.
+ * Nodes **must** be removed in exactly the reverse order from the scan list as
+ * they have been added (e.g. using list_add() as they are added and then
+ * list_for_each() over that eviction list to remove), otherwise the internal
+ * state of the memory manager will be corrupted.
*
* When the scan list is empty, the selected memory nodes can be freed. An
- * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
- * return the just freed block (because its at the top of the free_stack list).
+ * immediately following drm_mm_insert_node_in_range_generic() or one of the
+ * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
+ * the just freed block (because its at the top of the free_stack list).
*
* Returns:
* True if this block should be evicted, false otherwise. Will always
* return false when no hole has been found.
*/
-bool drm_mm_scan_remove_block(struct drm_mm_node *node)
+bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
+ struct drm_mm_node *node)
{
- struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
- mm->scanned_blocks--;
-
- BUG_ON(!node->scanned_block);
- node->scanned_block = 0;
-
- prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
- node_list);
-
- prev_node->hole_follows = node->scanned_preceeds_hole;
+ DRM_MM_BUG_ON(node->mm != scan->mm);
+ DRM_MM_BUG_ON(!node->scanned_block);
+ node->scanned_block = false;
+
+ DRM_MM_BUG_ON(!node->mm->scan_active);
+ node->mm->scan_active--;
+
+ /* During drm_mm_scan_add_block() we decoupled this node leaving
+ * its pointers intact. Now that the caller is walking back along
+ * the eviction list we can restore this block into its rightful
+ * place on the full node_list. To confirm that the caller is walking
+ * backwards correctly we check that prev_node->next == node->next,
+ * i.e. both believe the same node should be on the other side of the
+ * hole.
+ */
+ prev_node = list_prev_entry(node, node_list);
+ DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
+ list_next_entry(node, node_list));
list_add(&node->node_list, &prev_node->node_list);
- return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
- node->start < mm->scan_hit_end);
+ return (node->start + node->size > scan->hit_start &&
+ node->start < scan->hit_end);
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
/**
- * drm_mm_clean - checks whether an allocator is clean
- * @mm: drm_mm allocator to check
+ * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
+ * @scan: drm_mm scan with target hole
+ *
+ * After completing an eviction scan and removing the selected nodes, we may
+ * need to remove a few more nodes from either side of the target hole if
+ * mm.color_adjust is being used.
*
* Returns:
- * True if the allocator is completely free, false if there's still a node
- * allocated in it.
+ * A node to evict, or NULL if there are no overlapping nodes.
*/
-bool drm_mm_clean(struct drm_mm * mm)
+struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
{
- struct list_head *head = &mm->head_node.node_list;
+ struct drm_mm *mm = scan->mm;
+ struct drm_mm_node *hole;
+ u64 hole_start, hole_end;
+
+ DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
+
+ if (!mm->color_adjust)
+ return NULL;
+
+ hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
+ hole_start = __drm_mm_hole_node_start(hole);
+ hole_end = hole_start + hole->hole_size;
- return (head->next->next == head);
+ DRM_MM_BUG_ON(hole_start > scan->hit_start);
+ DRM_MM_BUG_ON(hole_end < scan->hit_end);
+
+ mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
+ if (hole_start > scan->hit_start)
+ return hole;
+ if (hole_end < scan->hit_end)
+ return list_next_entry(hole, node_list);
+
+ return NULL;
}
-EXPORT_SYMBOL(drm_mm_clean);
+EXPORT_SYMBOL(drm_mm_scan_color_evict);
/**
* drm_mm_init - initialize a drm-mm allocator
@@ -898,26 +856,26 @@ EXPORT_SYMBOL(drm_mm_clean);
*
* Note that @mm must be cleared to 0 before calling this function.
*/
-void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
+void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
{
+ DRM_MM_BUG_ON(start + size <= start);
+
+ mm->color_adjust = NULL;
+
INIT_LIST_HEAD(&mm->hole_stack);
- mm->scanned_blocks = 0;
+ mm->interval_tree = RB_ROOT;
+ mm->holes_size = RB_ROOT;
+ mm->holes_addr = RB_ROOT;
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
- mm->head_node.allocated = 0;
- mm->head_node.hole_follows = 1;
- mm->head_node.scanned_block = 0;
- mm->head_node.scanned_prev_free = 0;
- mm->head_node.scanned_next_free = 0;
+ mm->head_node.allocated = false;
mm->head_node.mm = mm;
mm->head_node.start = start + size;
- mm->head_node.size = start - mm->head_node.start;
- list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+ mm->head_node.size = -size;
+ add_hole(&mm->head_node);
- mm->interval_tree = RB_ROOT;
-
- mm->color_adjust = NULL;
+ mm->scan_active = 0;
}
EXPORT_SYMBOL(drm_mm_init);
@@ -930,95 +888,46 @@ EXPORT_SYMBOL(drm_mm_init);
*/
void drm_mm_takedown(struct drm_mm *mm)
{
- if (WARN(!list_empty(&mm->head_node.node_list),
+ if (WARN(!drm_mm_clean(mm),
"Memory manager not clean during takedown.\n"))
show_leaks(mm);
-
}
EXPORT_SYMBOL(drm_mm_takedown);
-static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
- const char *prefix)
+static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
{
- u64 hole_start, hole_end, hole_size;
-
- if (entry->hole_follows) {
- hole_start = drm_mm_hole_node_start(entry);
- hole_end = drm_mm_hole_node_end(entry);
- hole_size = hole_end - hole_start;
- pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
- hole_end, hole_size);
- return hole_size;
- }
-
- return 0;
-}
-
-/**
- * drm_mm_debug_table - dump allocator state to dmesg
- * @mm: drm_mm allocator to dump
- * @prefix: prefix to use for dumping to dmesg
- */
-void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
-{
- struct drm_mm_node *entry;
- u64 total_used = 0, total_free = 0, total = 0;
+ u64 start, size;
- total_free += drm_mm_debug_hole(&mm->head_node, prefix);
-
- drm_mm_for_each_node(entry, mm) {
- pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
- entry->start + entry->size, entry->size);
- total_used += entry->size;
- total_free += drm_mm_debug_hole(entry, prefix);
+ size = entry->hole_size;
+ if (size) {
+ start = drm_mm_hole_node_start(entry);
+ drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
+ start, start + size, size);
}
- total = total_free + total_used;
- pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
- total_used, total_free);
+ return size;
}
-EXPORT_SYMBOL(drm_mm_debug_table);
-
-#if defined(CONFIG_DEBUG_FS)
-static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
-{
- u64 hole_start, hole_end, hole_size;
-
- if (entry->hole_follows) {
- hole_start = drm_mm_hole_node_start(entry);
- hole_end = drm_mm_hole_node_end(entry);
- hole_size = hole_end - hole_start;
- seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
- hole_end, hole_size);
- return hole_size;
- }
-
- return 0;
-}
-
/**
- * drm_mm_dump_table - dump allocator state to a seq_file
- * @m: seq_file to dump to
- * @mm: drm_mm allocator to dump
+ * drm_mm_print - print allocator state
+ * @mm: drm_mm allocator to print
+ * @p: DRM printer to use
*/
-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
{
- struct drm_mm_node *entry;
+ const struct drm_mm_node *entry;
u64 total_used = 0, total_free = 0, total = 0;
- total_free += drm_mm_dump_hole(m, &mm->head_node);
+ total_free += drm_mm_dump_hole(p, &mm->head_node);
drm_mm_for_each_node(entry, mm) {
- seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
+ drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
entry->start + entry->size, entry->size);
total_used += entry->size;
- total_free += drm_mm_dump_hole(m, entry);
+ total_free += drm_mm_dump_hole(p, entry);
}
total = total_free + total_used;
- seq_printf(m, "total: %llu, used %llu free %llu\n", total,
+ drm_printf(p, "total: %llu, used %llu free %llu\n", total,
total_used, total_free);
- return 0;
}
-EXPORT_SYMBOL(drm_mm_dump_table);
-#endif
+EXPORT_SYMBOL(drm_mm_print);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 2735a5847ffa..884cc4d26fb5 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <drm/drm_encoder.h>
#include <drm/drm_mode_config.h>
#include <drm/drmP.h>
@@ -84,113 +85,74 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_card_res *card_res = data;
- struct list_head *lh;
struct drm_framebuffer *fb;
struct drm_connector *connector;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
- int ret = 0;
- int connector_count = 0;
- int crtc_count = 0;
- int fb_count = 0;
- int encoder_count = 0;
- int copied = 0;
+ int count, ret = 0;
uint32_t __user *fb_id;
uint32_t __user *crtc_id;
uint32_t __user *connector_id;
uint32_t __user *encoder_id;
+ struct drm_connector_list_iter conn_iter;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&file_priv->fbs_lock);
- /*
- * For the non-control nodes we need to limit the list of resources
- * by IDs in the group list for this node
- */
- list_for_each(lh, &file_priv->fbs)
- fb_count++;
-
- /* handle this in 4 parts */
- /* FBs */
- if (card_res->count_fbs >= fb_count) {
- copied = 0;
- fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, filp_head) {
- if (put_user(fb->base.id, fb_id + copied)) {
- mutex_unlock(&file_priv->fbs_lock);
- return -EFAULT;
- }
- copied++;
+ count = 0;
+ fb_id = u64_to_user_ptr(card_res->fb_id_ptr);
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+ if (count < card_res->count_fbs &&
+ put_user(fb->base.id, fb_id + count)) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -EFAULT;
}
+ count++;
}
- card_res->count_fbs = fb_count;
+ card_res->count_fbs = count;
mutex_unlock(&file_priv->fbs_lock);
- /* mode_config.mutex protects the connector list against e.g. DP MST
- * connector hot-adding. CRTC/Plane lists are invariant. */
- mutex_lock(&dev->mode_config.mutex);
- drm_for_each_crtc(crtc, dev)
- crtc_count++;
-
- drm_for_each_connector(connector, dev)
- connector_count++;
-
- drm_for_each_encoder(encoder, dev)
- encoder_count++;
-
card_res->max_height = dev->mode_config.max_height;
card_res->min_height = dev->mode_config.min_height;
card_res->max_width = dev->mode_config.max_width;
card_res->min_width = dev->mode_config.min_width;
- /* CRTCs */
- if (card_res->count_crtcs >= crtc_count) {
- copied = 0;
- crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
- drm_for_each_crtc(crtc, dev) {
- if (put_user(crtc->base.id, crtc_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
+ count = 0;
+ crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr);
+ drm_for_each_crtc(crtc, dev) {
+ if (count < card_res->count_crtcs &&
+ put_user(crtc->base.id, crtc_id + count))
+ return -EFAULT;
+ count++;
}
- card_res->count_crtcs = crtc_count;
-
- /* Encoders */
- if (card_res->count_encoders >= encoder_count) {
- copied = 0;
- encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
- drm_for_each_encoder(encoder, dev) {
- if (put_user(encoder->base.id, encoder_id +
- copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
+ card_res->count_crtcs = count;
+
+ count = 0;
+ encoder_id = u64_to_user_ptr(card_res->encoder_id_ptr);
+ drm_for_each_encoder(encoder, dev) {
+ if (count < card_res->count_encoders &&
+ put_user(encoder->base.id, encoder_id + count))
+ return -EFAULT;
+ count++;
}
- card_res->count_encoders = encoder_count;
-
- /* Connectors */
- if (card_res->count_connectors >= connector_count) {
- copied = 0;
- connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
- drm_for_each_connector(connector, dev) {
- if (put_user(connector->base.id,
- connector_id + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ card_res->count_encoders = count;
+
+ drm_connector_list_iter_get(dev, &conn_iter);
+ count = 0;
+ connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (count < card_res->count_connectors &&
+ put_user(connector->base.id, connector_id + count)) {
+ drm_connector_list_iter_put(&conn_iter);
+ return -EFAULT;
}
+ count++;
}
- card_res->count_connectors = connector_count;
+ card_res->count_connectors = count;
+ drm_connector_list_iter_put(&conn_iter);
-out:
- mutex_unlock(&dev->mode_config.mutex);
return ret;
}
@@ -208,6 +170,7 @@ void drm_mode_config_reset(struct drm_device *dev)
struct drm_plane *plane;
struct drm_encoder *encoder;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
drm_for_each_plane(plane, dev)
if (plane->funcs->reset)
@@ -221,11 +184,11 @@ void drm_mode_config_reset(struct drm_device *dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- mutex_lock(&dev->mode_config.mutex);
- drm_for_each_connector(connector, dev)
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
if (connector->funcs->reset)
connector->funcs->reset(connector);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_connector_list_iter_put(&conn_iter);
}
EXPORT_SYMBOL(drm_mode_config_reset);
@@ -406,10 +369,9 @@ void drm_mode_config_init(struct drm_device *dev)
idr_init(&dev->mode_config.crtc_idr);
idr_init(&dev->mode_config.tile_idr);
ida_init(&dev->mode_config.connector_ida);
+ spin_lock_init(&dev->mode_config.connector_list_lock);
- drm_modeset_lock_all(dev);
drm_mode_create_standard_properties(dev);
- drm_modeset_unlock_all(dev);
/* Just to be sure */
dev->mode_config.num_fb = 0;
@@ -436,7 +398,8 @@ EXPORT_SYMBOL(drm_mode_config_init);
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
- struct drm_connector *connector, *ot;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
struct drm_crtc *crtc, *ct;
struct drm_encoder *encoder, *enct;
struct drm_framebuffer *fb, *fbt;
@@ -449,9 +412,20 @@ void drm_mode_config_cleanup(struct drm_device *dev)
encoder->funcs->destroy(encoder);
}
- list_for_each_entry_safe(connector, ot,
- &dev->mode_config.connector_list, head) {
- connector->funcs->destroy(connector);
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ /* drm_connector_list_iter holds an full reference to the
+ * current connector itself, which means it is inherently safe
+ * against unreferencing the current connector - but not against
+ * deleting it right away. */
+ drm_connector_unreference(connector);
+ }
+ drm_connector_list_iter_put(&conn_iter);
+ if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
+ DRM_ERROR("connector %s leaked!\n", connector->name);
+ drm_connector_list_iter_put(&conn_iter);
}
list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index c6885a4911c0..220a6c1f4ab9 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -23,6 +23,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_atomic.h>
#include "drm_crtc_internal.h"
@@ -273,7 +274,7 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
* their value in obj->properties->values[].. mostly to avoid
* having to deal w/ EDID and similar props in atomic paths:
*/
- if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
+ if (drm_drv_uses_atomic_modeset(property->dev) &&
!(property->flags & DRM_MODE_PROP_IMMUTABLE))
return drm_atomic_get_property(obj, property, val);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index e6b19bc9021a..fd22c1c891bf 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -797,6 +797,26 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_vrefresh);
/**
+ * drm_mode_get_hv_timing - Fetches hdisplay/vdisplay for given mode
+ * @mode: mode to query
+ * @hdisplay: hdisplay value to fill in
+ * @vdisplay: vdisplay value to fill in
+ *
+ * The vdisplay value will be doubled if the specified mode is a stereo mode of
+ * the appropriate layout.
+ */
+void drm_mode_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay)
+{
+ struct drm_display_mode adjusted = *mode;
+
+ drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
+ *hdisplay = adjusted.crtc_hdisplay;
+ *vdisplay = adjusted.crtc_vdisplay;
+}
+EXPORT_SYMBOL(drm_mode_get_hv_timing);
+
+/**
* drm_mode_set_crtcinfo - set CRTC modesetting timing parameters
* @p: mode
* @adjust_flags: a combination of adjustment flags
@@ -1461,12 +1481,8 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
mode->type |= DRM_MODE_TYPE_USERDEF;
/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
- if (cmd->xres == 1366 && mode->hdisplay == 1368) {
- mode->hdisplay = 1366;
- mode->hsync_start--;
- mode->hsync_end--;
- drm_mode_set_name(mode);
- }
+ if (cmd->xres == 1366)
+ drm_mode_fixup_1366x768(mode);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
return mode;
}
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index cc232ac6c950..cc44a9a4b004 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -48,6 +48,7 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
INIT_LIST_HEAD(&panel_list);
+ spin_lock_irq(&dev->mode_config.connector_list_lock);
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
@@ -57,38 +58,27 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
}
list_splice(&panel_list, &dev->mode_config.connector_list);
+ spin_unlock_irq(&dev->mode_config.connector_list_lock);
}
EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
/**
* drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
+ * @dev: DRM device
* @fb: drm_framebuffer object to fill out
* @mode_cmd: metadata from the userspace fb creation request
*
* This helper can be used in a drivers fb_create callback to pre-fill the fb's
* metadata fields.
*/
-void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
+ struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
int i;
- info = drm_format_info(mode_cmd->pixel_format);
- if (!info || !info->depth) {
- struct drm_format_name_buf format_name;
-
- DRM_DEBUG_KMS("non-RGB pixel format %s\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name));
-
- fb->depth = 0;
- fb->bits_per_pixel = 0;
- } else {
- fb->depth = info->depth;
- fb->bits_per_pixel = info->cpp[0] * 8;
- }
-
+ fb->dev = dev;
+ fb->format = drm_format_info(mode_cmd->pixel_format);
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
@@ -96,7 +86,6 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
fb->offsets[i] = mode_cmd->offsets[i];
}
fb->modifier = mode_cmd->modifier[0];
- fb->pixel_format = mode_cmd->pixel_format;
fb->flags = mode_cmd->flags;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 3551ae31f143..bf60f2645e55 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -33,7 +33,7 @@
* to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
* the locking is more distributed around the driver code, we want a bit
* of extra utility/tracking out of our acquire-ctx. This is provided
- * by drm_modeset_lock / drm_modeset_acquire_ctx.
+ * by &struct drm_modeset_lock and &struct drm_modeset_acquire_ctx.
*
* For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt
*
@@ -53,7 +53,7 @@
* drm_modeset_acquire_fini(&ctx);
*
* On top of of these per-object locks using &ww_mutex there's also an overall
- * dev->mode_config.lock, for protecting everything else. Mostly this means
+ * &drm_mode_config.mutex, for protecting everything else. Mostly this means
* probe state of connectors, and preventing hotplug add/removal of connectors.
*
* Finally there's a bunch of dedicated locks to protect drm core internal
@@ -71,7 +71,7 @@ static DEFINE_WW_CLASS(crtc_ww_class);
* drm_modeset_unlock_all() function.
*
* This function is deprecated. It allocates a lock acquisition context and
- * stores it in the DRM device's ->mode_config. This facilitate conversion of
+ * stores it in &drm_device.mode_config. This facilitate conversion of
* existing code because it removes the need to manually deal with the
* acquisition context, but it is also brittle because the context is global
* and care must be taken not to nest calls. New code should use the
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(drm_modeset_lock_all);
* drm_modeset_lock_all() function.
*
* This function is deprecated. It uses the lock acquisition context stored
- * in the DRM device's ->mode_config. This facilitates conversion of existing
+ * in &drm_device.mode_config. This facilitates conversion of existing
* code because it removes the need to manually deal with the acquisition
* context, but it is also brittle because the context is global and care must
* be taken not to nest calls. New code should pass the acquisition context
@@ -468,7 +468,7 @@ EXPORT_SYMBOL(drm_modeset_unlock);
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented.
*
- * Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex
+ * Unlike drm_modeset_lock_all(), it doesn't take the &drm_mode_config.mutex
* since that lock isn't required for modeset state changes. Callers which
* need to grab that lock too need to do so outside of the acquire context
* @ctx.
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 47848ed8ca48..b5f2f0fece99 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -4,6 +4,7 @@
#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
static void drm_release_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 3dfe3c886502..308d442a531b 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(drm_panel_detach);
* Return: A pointer to the panel registered for the specified device tree
* node or NULL if no panel matching the device tree node can be found.
*/
-struct drm_panel *of_drm_find_panel(struct device_node *np)
+struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
struct drm_panel *panel;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 3ceea9cb9d3e..a3b356e70b35 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -191,7 +191,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
- if (drm_pci_device_is_agp(dev))
+ if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
dev->agp = drm_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
@@ -223,7 +223,7 @@ void drm_pci_agp_destroy(struct drm_device *dev)
* Try and register, if we fail to register, backout previous work.
*
* NOTE: This function is deprecated, please use drm_dev_alloc() and
- * drm_dev_register() instead and remove your ->load() callback.
+ * drm_dev_register() instead and remove your &drm_driver.load callback.
*
* Return: 0 on success or a negative error code on failure.
*/
@@ -257,10 +257,6 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
if (ret)
goto err_agp;
- DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, pci_name(pdev), dev->primary->index);
-
/* No locking needed since shadow-attach is single-threaded since it may
* only be called from the per-driver module init hook. */
if (drm_core_check_feature(dev, DRIVER_LEGACY))
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 62b98f386fd1..c464fc4a874d 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -37,12 +37,12 @@
* rotation or Z-position. All these properties are stored in &drm_plane_state.
*
* To create a plane, a KMS drivers allocates and zeroes an instances of
- * struct &drm_plane (possibly as part of a larger structure) and registers it
+ * &struct drm_plane (possibly as part of a larger structure) and registers it
* with a call to drm_universal_plane_init().
*
* Cursor and overlay planes are optional. All drivers should provide one
* primary plane per CRTC to avoid surprising userspace too much. See enum
- * &drm_plane_type for a more in-depth discussion of these special uapi-relevant
+ * drm_plane_type for a more in-depth discussion of these special uapi-relevant
* plane types. Special planes are associated with their CRTC by calling
* drm_crtc_init_with_planes().
*
@@ -254,7 +254,7 @@ EXPORT_SYMBOL(drm_plane_cleanup);
* @idx: index of registered plane to find for
*
* Given a plane index, return the registered plane from DRM device's
- * list of planes with matching index.
+ * list of planes with matching index. This is the inverse of drm_plane_index().
*/
struct drm_plane *
drm_plane_from_index(struct drm_device *dev, int idx)
@@ -392,12 +392,16 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
return -ENOENT;
drm_modeset_lock(&plane->mutex, NULL);
- if (plane->crtc)
+ if (plane->state && plane->state->crtc)
+ plane_resp->crtc_id = plane->state->crtc->base.id;
+ else if (!plane->state && plane->crtc)
plane_resp->crtc_id = plane->crtc->base.id;
else
plane_resp->crtc_id = 0;
- if (plane->fb)
+ if (plane->state && plane->state->fb)
+ plane_resp->fb_id = plane->state->fb->base.id;
+ else if (!plane->state && plane->fb)
plane_resp->fb_id = plane->fb->base.id;
else
plane_resp->fb_id = 0;
@@ -478,11 +482,11 @@ static int __setplane_internal(struct drm_plane *plane,
}
/* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
+ ret = drm_plane_check_pixel_format(plane, fb->format->format);
if (ret) {
struct drm_format_name_buf format_name;
DRM_DEBUG_KMS("Invalid pixel format %s\n",
- drm_get_format_name(fb->pixel_format,
+ drm_get_format_name(fb->format->format,
&format_name));
goto out;
}
@@ -854,7 +858,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (ret)
goto out;
- if (crtc->primary->fb->pixel_format != fb->pixel_format) {
+ if (crtc->primary->fb->format != fb->format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL;
goto out;
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 7a7dddf604d7..148688fb920a 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -29,6 +29,7 @@
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_atomic_helper.h>
#define SUBPIXEL_MASK 0xffff
@@ -38,9 +39,9 @@
*
* This helper library has two parts. The first part has support to implement
* primary plane support on top of the normal CRTC configuration interface.
- * Since the legacy ->set_config interface ties the primary plane together with
- * the CRTC state this does not allow userspace to disable the primary plane
- * itself. To avoid too much duplicated code use
+ * Since the legacy &drm_mode_config_funcs.set_config interface ties the primary
+ * plane together with the CRTC state this does not allow userspace to disable
+ * the primary plane itself. To avoid too much duplicated code use
* drm_plane_helper_check_update() which can be used to enforce the same
* restrictions as primary planes had thus. The default primary plane only
* expose XRBG8888 and ARGB8888 as valid pixel formats for the attached
@@ -59,7 +60,7 @@
* Again drivers are strongly urged to switch to the new interfaces.
*
* The plane helpers share the function table structures with other helpers,
- * specifically also the atomic helpers. See struct &drm_plane_helper_funcs for
+ * specifically also the atomic helpers. See &struct drm_plane_helper_funcs for
* the details.
*/
@@ -74,6 +75,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int count = 0;
/*
@@ -83,7 +85,8 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
*/
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder && connector->encoder->crtc == crtc) {
if (connector_list != NULL && count < num_connectors)
*(connector_list++) = connector;
@@ -91,6 +94,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
count++;
}
}
+ drm_connector_list_iter_put(&conn_iter);
return count;
}
@@ -380,7 +384,8 @@ EXPORT_SYMBOL(drm_primary_helper_update);
* is called in response to a userspace SetPlane operation on the plane with a
* NULL framebuffer parameter. It unconditionally fails the disable call with
* -EINVAL the only way to disable the primary plane without driver support is
- * to disable the entier CRTC. Which does not match the plane ->disable hook.
+ * to disable the entire CRTC. Which does not match the plane
+ * &drm_plane_funcs.disable_plane hook.
*
* Note that some hardware may be able to disable the primary plane without
* disabling the whole CRTC. Drivers for such hardware should provide their
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 026269851ce9..56d2f93ed6b9 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -57,10 +57,6 @@ static int drm_get_platform_dev(struct platform_device *platdev,
if (ret)
goto err_free;
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, dev->primary->index);
-
return 0;
err_free:
@@ -78,7 +74,7 @@ err_free:
* .load() function.
*
* NOTE: This function is deprecated, please use drm_dev_alloc() and
- * drm_dev_register() instead and remove your ->load() callback.
+ * drm_dev_register() instead and remove your &drm_driver.load callback.
*
* Return: 0 on success or a negative error code on failure.
*/
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 8d77b2462594..25aa4558f1b5 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -40,8 +40,11 @@
* On the export the dma_buf holds a reference to the exporting GEM
* object. It takes this reference in handle_to_fd_ioctl, when it
* first calls .prime_export and stores the exporting GEM object in
- * the dma_buf priv. This reference is released when the dma_buf
- * object goes away in the driver .release function.
+ * the dma_buf priv. This reference needs to be released when the
+ * final reference to the &dma_buf itself is dropped and its
+ * &dma_buf_ops.release function is called. For GEM-based drivers,
+ * the dma_buf should be exported using drm_gem_dmabuf_export() and
+ * then released by drm_gem_dmabuf_release().
*
* On the import the importing GEM object holds a reference to the
* dma_buf (which in turn holds a ref to the exporting GEM object).
@@ -51,6 +54,16 @@
* when the imported object is destroyed, we remove the attachment
* and drop the reference to the dma_buf.
*
+ * When all the references to the &dma_buf are dropped, i.e. when
+ * userspace has closed both handles to the imported GEM object (through the
+ * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported
+ * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references
+ * are also gone, then the dma_buf gets destroyed. This can also happen as a
+ * part of the clean up procedure in the drm_release() function if userspace
+ * fails to properly clean up. Note that both the kernel and userspace (by
+ * keeeping the PRIME file descriptors open) can hold references onto a
+ * &dma_buf.
+ *
* Thus the chain of references always flows in one direction
* (avoiding loops): importing_gem -> dmabuf -> exporting_gem
*
@@ -291,7 +304,7 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
* This wraps dma_buf_export() for use by generic GEM drivers that are using
* drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
* a reference to the &drm_device and the exported &drm_gem_object (stored in
- * exp_info->priv) which is released by drm_gem_dmabuf_release().
+ * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
*
* Returns the new dmabuf.
*/
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index ad3caaa1f48b..02a107d50706 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -40,6 +40,12 @@ void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf)
}
EXPORT_SYMBOL(__drm_printfn_info);
+void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
+{
+ pr_debug("%s %pV", p->prefix, vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_debug);
+
/**
* drm_printf - print to a &drm_printer stream
* @p: the &drm_printer
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index cf8f0128c161..93381454bdf7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -43,7 +43,7 @@
* DOC: output probing helper overview
*
* This library provides some helper code for output probing. It provides an
- * implementation of the core connector->fill_modes interface with
+ * implementation of the core &drm_connector_funcs.fill_modes interface with
* drm_helper_probe_single_connector_modes.
*
* It also provides support for polling connectors with a work item and for
@@ -55,7 +55,7 @@
* handling code to avoid probing unrelated outputs.
*
* The probe helpers share the function table structures with other display
- * helper libraries. See struct &drm_connector_helper_funcs for the details.
+ * helper libraries. See &struct drm_connector_helper_funcs for the details.
*/
static bool drm_kms_helper_poll = true;
@@ -115,32 +115,38 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
/**
- * drm_kms_helper_poll_enable_locked - re-enable output polling.
+ * drm_kms_helper_poll_enable - re-enable output polling.
* @dev: drm_device
*
- * This function re-enables the output polling work without
- * locking the mode_config mutex.
+ * This function re-enables the output polling work, after it has been
+ * temporarily disabled using drm_kms_helper_poll_disable(), for example over
+ * suspend/resume.
+ *
+ * Drivers can call this helper from their device resume implementation. It is
+ * an error to call this when the output polling support has not yet been set
+ * up.
*
- * This is like drm_kms_helper_poll_enable() however it is to be
- * called from a context where the mode_config mutex is locked
- * already.
+ * Note that calls to enable and disable polling must be strictly ordered, which
+ * is automatically the case when they're only call from suspend/resume
+ * callbacks.
*/
-void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
+void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
return;
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
+ drm_connector_list_iter_put(&conn_iter);
if (dev->mode_config.delayed_event) {
/*
@@ -160,7 +166,7 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
}
-EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
static enum drm_connector_status
drm_connector_detect(struct drm_connector *connector, bool force)
@@ -181,9 +187,9 @@ drm_connector_detect(struct drm_connector *connector, bool force)
* be added to the connector's probed_modes list, then culled (based on validity
* and the @maxX, @maxY parameters) and put into the normal modes list.
*
- * Intended to be used as a generic implementation of the ->fill_modes()
- * @connector vfunc for drivers that use the CRTC helpers for output mode
- * filtering and detection.
+ * Intended to be used as a generic implementation of the
+ * &drm_connector_funcs.fill_modes() vfunc for drivers that use the CRTC helpers
+ * for output mode filtering and detection.
*
* The basic procedure is as follows
*
@@ -195,7 +201,7 @@ drm_connector_detect(struct drm_connector *connector, bool force)
*
* - debugfs 'override_edid' (used for testing only)
* - firmware EDID (drm_load_edid_firmware())
- * - connector helper ->get_modes() vfunc
+ * - &drm_connector_helper_funcs.get_modes vfunc
* - if the connector status is connector_status_connected, standard
* VESA DMT modes up to 1024x768 are automatically added
* (drm_add_modes_noedid())
@@ -216,8 +222,8 @@ drm_connector_detect(struct drm_connector *connector, bool force)
* (if specified)
* - drm_mode_validate_flag() checks the modes againt basic connector
* capabilites (interlace_allowed,doublescan_allowed,stereo_allowed)
- * - the optional connector ->mode_valid() helper can perform driver and/or
- * hardware specific checks
+ * - the optional &drm_connector_helper_funcs.mode_valid helper can perform
+ * driver and/or hardware specific checks
*
* 5. Any mode whose status is not OK is pruned from the connector's modes list,
* accompanied by a debug message indicating the reason for the mode's
@@ -287,7 +293,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
- drm_kms_helper_poll_enable_locked(dev);
+ drm_kms_helper_poll_enable(dev);
dev->mode_config.poll_running = drm_kms_helper_poll;
@@ -392,6 +398,7 @@ static void output_poll_execute(struct work_struct *work)
struct delayed_work *delayed_work = to_delayed_work(work);
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
enum drm_connector_status old_status;
bool repoll = false, changed;
@@ -407,8 +414,8 @@ static void output_poll_execute(struct work_struct *work)
goto out;
}
- drm_for_each_connector(connector, dev) {
-
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
/* Ignore forced connectors. */
if (connector->force)
continue;
@@ -461,6 +468,7 @@ static void output_poll_execute(struct work_struct *work)
changed = true;
}
}
+ drm_connector_list_iter_put(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
@@ -479,8 +487,12 @@ out:
* This function disables the output polling work.
*
* Drivers can call this helper from their device suspend implementation. It is
- * not an error to call this even when output polling isn't enabled or arlready
- * disabled.
+ * not an error to call this even when output polling isn't enabled or already
+ * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
+ *
+ * Note that calls to enable and disable polling must be strictly ordered, which
+ * is automatically the case when they're only call from suspend/resume
+ * callbacks.
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
@@ -491,24 +503,6 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
/**
- * drm_kms_helper_poll_enable - re-enable output polling.
- * @dev: drm_device
- *
- * This function re-enables the output polling work.
- *
- * Drivers can call this helper from their device resume implementation. It is
- * an error to call this when the output polling support has not yet been set
- * up.
- */
-void drm_kms_helper_poll_enable(struct drm_device *dev)
-{
- mutex_lock(&dev->mode_config.mutex);
- drm_kms_helper_poll_enable_locked(dev);
- mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_kms_helper_poll_enable);
-
-/**
* drm_kms_helper_poll_init - initialize and enable output polling
* @dev: drm_device
*
@@ -572,6 +566,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
enum drm_connector_status old_status;
bool changed = false;
@@ -579,8 +574,8 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
return false;
mutex_lock(&dev->mode_config.mutex);
- drm_for_each_connector(connector, dev) {
-
+ drm_connector_list_iter_get(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
/* Only handle HPD capable connectors. */
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
@@ -596,7 +591,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
if (old_status != connector->status)
changed = true;
}
-
+ drm_connector_list_iter_put(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
if (changed)
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 24be69d29964..7fc070f3e49e 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -34,7 +34,7 @@
* even the only way to transport metadata about the desired new modeset
* configuration from userspace to the kernel. Properties have a well-defined
* value range, which is enforced by the drm core. See the documentation of the
- * flags member of struct &drm_property for an overview of the different
+ * flags member of &struct drm_property for an overview of the different
* property types and ranges.
*
* Properties don't store the current value directly, but need to be
@@ -42,8 +42,8 @@
* drm_object_attach_property().
*
* Property values are only 64bit. To support bigger piles of data (like gamma
- * tables, color correction matrizes or large structures) a property can instead
- * point at a &drm_property_blob with that additional data
+ * tables, color correction matrices or large structures) a property can instead
+ * point at a &drm_property_blob with that additional data.
*
* Properties are defined by their symbolic name, userspace must keep a
* per-object mapping from those names to the property ID used in the atomic
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index e6057d8cdcd5..bc5575960ebc 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -371,10 +371,10 @@ EXPORT_SYMBOL(drm_rect_rotate);
* to the vertical axis of the original untransformed
* coordinate space, so that you never have to flip
* them when doing a rotatation and its inverse.
- * That is, if you do:
+ * That is, if you do ::
*
- * drm_rotate(&r, width, height, rotation);
- * drm_rotate_inv(&r, width, height, rotation);
+ * drm_rotate(&r, width, height, rotation);
+ * drm_rotate_inv(&r, width, height, rotation);
*
* you will always get back the original rectangle.
*/
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 7bae08c2bf0a..35c5d99296b9 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -23,7 +23,7 @@
*
* drm_simple_display_pipe_init() initializes a simple display pipeline
* which has only one full-screen scanout buffer feeding one output. The
- * pipeline is represented by struct &drm_simple_display_pipe and binds
+ * pipeline is represented by &struct drm_simple_display_pipe and binds
* together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed
* entity. Some flexibility for code reuse is provided through a separately
* allocated &drm_connector object and supporting optional &drm_bridge
@@ -182,30 +182,11 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
struct drm_bridge *bridge)
{
- bridge->encoder = &pipe->encoder;
- pipe->encoder.bridge = bridge;
- return drm_bridge_attach(pipe->encoder.dev, bridge);
+ return drm_bridge_attach(&pipe->encoder, bridge, NULL);
}
EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
/**
- * drm_simple_display_pipe_detach_bridge - Detach the bridge from the display pipe
- * @pipe: simple display pipe object
- *
- * Detaches the drm bridge previously attached with
- * drm_simple_display_pipe_attach_bridge()
- */
-void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe)
-{
- if (WARN_ON(!pipe->encoder.bridge))
- return;
-
- drm_bridge_detach(pipe->encoder.bridge);
- pipe->encoder.bridge = NULL;
-}
-EXPORT_SYMBOL(drm_simple_display_pipe_detach_bridge);
-
-/**
* drm_simple_display_pipe_init - Initialize a simple display pipeline
* @dev: DRM device
* @pipe: simple display pipe object to initialize
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 9a37196c1bf1..513288b5c2f6 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -255,7 +255,7 @@ static const struct attribute_group *connector_dev_groups[] = {
* @connector: connector to add
*
* Create a connector device in sysfs, along with its associated connector
- * properties (so far, connection status, dpms, mode list & edid) and
+ * properties (so far, connection status, dpms, mode list and edid) and
* generate a hotplug event so userspace knows there's a new connector
* available.
*/
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 20cc33d1bfc1..d9100b565198 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -212,8 +212,7 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
goto out_unlock;
}
- ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
- pages, 0, DRM_MM_SEARCH_DEFAULT);
+ ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
if (ret)
goto out_unlock;
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 2cde7a5442fb..cc1731c5289c 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -2,7 +2,8 @@
config DRM_ETNAVIV
tristate "ETNAVIV (DRM support for Vivante GPU IP cores)"
depends on DRM
- depends on ARCH_MXC || ARCH_DOVE
+ depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST)
+ depends on MMU
select SHMEM
select TMPFS
select IOMMU_API
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
index 1086e9876f91..4f76c992043f 100644
--- a/drivers/gpu/drm/etnaviv/Makefile
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -1,6 +1,7 @@
etnaviv-y := \
etnaviv_buffer.o \
etnaviv_cmd_parser.o \
+ etnaviv_cmdbuf.o \
etnaviv_drv.o \
etnaviv_dump.o \
etnaviv_gem_prime.o \
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index d9230132dfbc..ed9588f36bc9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -15,6 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
@@ -125,7 +126,7 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
u32 *ptr = buf->vaddr + off;
dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
- ptr, etnaviv_iommu_get_cmdbuf_va(gpu, buf) + off, size - len * 4 - off);
+ ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
ptr, len * 4, 0);
@@ -158,7 +159,7 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
buffer->user_size = 0;
- return etnaviv_iommu_get_cmdbuf_va(gpu, buffer) + buffer->user_size;
+ return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
}
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
@@ -169,7 +170,7 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
buffer->user_size = 0;
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+ CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
buffer->user_size - 4);
return buffer->user_size / 8;
@@ -261,7 +262,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
- link_target = etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf);
+ link_target = etnaviv_cmdbuf_get_va(cmdbuf);
link_dwords = cmdbuf->size / 8;
/*
@@ -355,12 +356,13 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+ CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
buffer->user_size - 4);
if (drm_debug & DRM_UT_DRIVER)
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
- return_target, etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf), cmdbuf->vaddr);
+ return_target, etnaviv_cmdbuf_get_va(cmdbuf),
+ cmdbuf->vaddr);
if (drm_debug & DRM_UT_DRIVER) {
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
index 2a2e5e366ab7..6e3bbcf24160 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -56,6 +56,8 @@ static const struct {
ST(0x0644, 1),
ST(0x064c, 1),
ST(0x0680, 8),
+ ST(0x086c, 1),
+ ST(0x1028, 1),
ST(0x1410, 1),
ST(0x1430, 1),
ST(0x1458, 1),
@@ -73,8 +75,12 @@ static const struct {
ST(0x16c0, 8),
ST(0x16e0, 8),
ST(0x1740, 8),
+ ST(0x17c0, 8),
+ ST(0x17e0, 8),
ST(0x2400, 14 * 16),
ST(0x10800, 32 * 16),
+ ST(0x14600, 16),
+ ST(0x14800, 8 * 8),
#undef ST
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
new file mode 100644
index 000000000000..633e0f07cbac
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drm_mm.h>
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+
+#define SUBALLOC_SIZE SZ_256K
+#define SUBALLOC_GRANULE SZ_4K
+#define SUBALLOC_GRANULES (SUBALLOC_SIZE / SUBALLOC_GRANULE)
+
+struct etnaviv_cmdbuf_suballoc {
+ /* suballocated dma buffer properties */
+ struct etnaviv_gpu *gpu;
+ void *vaddr;
+ dma_addr_t paddr;
+
+ /* GPU mapping */
+ u32 iova;
+ struct drm_mm_node vram_node; /* only used on MMUv2 */
+
+ /* allocation management */
+ struct mutex lock;
+ DECLARE_BITMAP(granule_map, SUBALLOC_GRANULES);
+ int free_space;
+ wait_queue_head_t free_event;
+};
+
+struct etnaviv_cmdbuf_suballoc *
+etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu)
+{
+ struct etnaviv_cmdbuf_suballoc *suballoc;
+ int ret;
+
+ suballoc = kzalloc(sizeof(*suballoc), GFP_KERNEL);
+ if (!suballoc)
+ return ERR_PTR(-ENOMEM);
+
+ suballoc->gpu = gpu;
+ mutex_init(&suballoc->lock);
+ init_waitqueue_head(&suballoc->free_event);
+
+ suballoc->vaddr = dma_alloc_wc(gpu->dev, SUBALLOC_SIZE,
+ &suballoc->paddr, GFP_KERNEL);
+ if (!suballoc->vaddr)
+ goto free_suballoc;
+
+ ret = etnaviv_iommu_get_suballoc_va(gpu, suballoc->paddr,
+ &suballoc->vram_node, SUBALLOC_SIZE,
+ &suballoc->iova);
+ if (ret)
+ goto free_dma;
+
+ return suballoc;
+
+free_dma:
+ dma_free_wc(gpu->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr);
+free_suballoc:
+ kfree(suballoc);
+
+ return NULL;
+}
+
+void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
+{
+ etnaviv_iommu_put_suballoc_va(suballoc->gpu, &suballoc->vram_node,
+ SUBALLOC_SIZE, suballoc->iova);
+ dma_free_wc(suballoc->gpu->dev, SUBALLOC_SIZE, suballoc->vaddr,
+ suballoc->paddr);
+ kfree(suballoc);
+}
+
+struct etnaviv_cmdbuf *
+etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
+ size_t nr_bos)
+{
+ struct etnaviv_cmdbuf *cmdbuf;
+ size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
+ sizeof(*cmdbuf));
+ int granule_offs, order, ret;
+
+ cmdbuf = kzalloc(sz, GFP_KERNEL);
+ if (!cmdbuf)
+ return NULL;
+
+ cmdbuf->suballoc = suballoc;
+ cmdbuf->size = size;
+
+ order = order_base_2(ALIGN(size, SUBALLOC_GRANULE) / SUBALLOC_GRANULE);
+retry:
+ mutex_lock(&suballoc->lock);
+ granule_offs = bitmap_find_free_region(suballoc->granule_map,
+ SUBALLOC_GRANULES, order);
+ if (granule_offs < 0) {
+ suballoc->free_space = 0;
+ mutex_unlock(&suballoc->lock);
+ ret = wait_event_interruptible_timeout(suballoc->free_event,
+ suballoc->free_space,
+ msecs_to_jiffies(10 * 1000));
+ if (!ret) {
+ dev_err(suballoc->gpu->dev,
+ "Timeout waiting for cmdbuf space\n");
+ return NULL;
+ }
+ goto retry;
+ }
+ mutex_unlock(&suballoc->lock);
+ cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
+ cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
+
+ return cmdbuf;
+}
+
+void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
+{
+ struct etnaviv_cmdbuf_suballoc *suballoc = cmdbuf->suballoc;
+ int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
+ SUBALLOC_GRANULE);
+
+ mutex_lock(&suballoc->lock);
+ bitmap_release_region(suballoc->granule_map,
+ cmdbuf->suballoc_offset / SUBALLOC_GRANULE,
+ order);
+ suballoc->free_space = 1;
+ mutex_unlock(&suballoc->lock);
+ wake_up_all(&suballoc->free_event);
+ kfree(cmdbuf);
+}
+
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
+{
+ return buf->suballoc->iova + buf->suballoc_offset;
+}
+
+dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
+{
+ return buf->suballoc->paddr + buf->suballoc_offset;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
new file mode 100644
index 000000000000..80d78076c679
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_CMDBUF_H__
+#define __ETNAVIV_CMDBUF_H__
+
+#include <linux/types.h>
+
+struct etnaviv_gpu;
+struct etnaviv_cmdbuf_suballoc;
+
+struct etnaviv_cmdbuf {
+ /* suballocator this cmdbuf is allocated from */
+ struct etnaviv_cmdbuf_suballoc *suballoc;
+ /* user context key, must be unique between all active users */
+ struct etnaviv_file_private *ctx;
+ /* cmdbuf properties */
+ int suballoc_offset;
+ void *vaddr;
+ u32 size;
+ u32 user_size;
+ /* fence after which this buffer is to be disposed */
+ struct dma_fence *fence;
+ /* target exec state */
+ u32 exec_state;
+ /* per GPU in-flight list */
+ struct list_head node;
+ /* BOs attached to this command buffer */
+ unsigned int nr_bos;
+ struct etnaviv_vram_mapping *bo_map[0];
+};
+
+struct etnaviv_cmdbuf_suballoc *
+etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu);
+void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
+
+struct etnaviv_cmdbuf *
+etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
+ size_t nr_bos);
+void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
+
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
+dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf);
+
+#endif /* __ETNAVIV_CMDBUF_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 00368b14d08d..587e45043542 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -18,11 +18,11 @@
#include <linux/of_platform.h>
#include <drm/drm_of.h>
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_gem.h"
#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
static bool reglog;
@@ -147,21 +147,23 @@ static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
{
- int ret;
+ struct drm_printer p = drm_seq_file_printer(m);
read_lock(&dev->vma_offset_manager->vm_lock);
- ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
+ drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
read_unlock(&dev->vma_offset_manager->vm_lock);
- return ret;
+ return 0;
}
static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
{
+ struct drm_printer p = drm_seq_file_printer(m);
+
seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
mutex_lock(&gpu->mmu->lock);
- drm_mm_dump_table(m, &gpu->mmu->mm);
+ drm_mm_print(&gpu->mmu->mm, &p);
mutex_unlock(&gpu->mmu->lock);
return 0;
@@ -175,7 +177,8 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
u32 i;
seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
- buf->vaddr, (u64)buf->paddr, size - buf->user_size);
+ buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
+ size - buf->user_size);
for (i = 0; i < size / 4; i++) {
if (i && !(i % 4))
@@ -256,12 +259,6 @@ static int etnaviv_debugfs_init(struct drm_minor *minor)
return ret;
}
-
-static void etnaviv_debugfs_cleanup(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(etnaviv_debugfs_list,
- ARRAY_SIZE(etnaviv_debugfs_list), minor);
-}
#endif
/*
@@ -507,7 +504,6 @@ static struct drm_driver etnaviv_drm_driver = {
.gem_prime_mmap = etnaviv_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
- .debugfs_cleanup = etnaviv_debugfs_cleanup,
#endif
.ioctls = etnaviv_ioctls,
.num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
@@ -592,7 +588,7 @@ static void etnaviv_unbind(struct device *dev)
drm->dev_private = NULL;
kfree(priv);
- drm_put_dev(drm);
+ drm_dev_unref(drm);
}
static const struct component_master_ops etnaviv_master_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index af65491a78e2..d019b5e311cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -15,6 +15,7 @@
*/
#include <linux/devcoredump.h>
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
@@ -177,12 +178,11 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
gpu->buffer->size,
- etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer));
+ etnaviv_cmdbuf_get_va(gpu->buffer));
list_for_each_entry(cmd, &gpu->active_cmd_list, node)
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
- cmd->size,
- etnaviv_iommu_get_cmdbuf_va(gpu, cmd));
+ cmd->size, etnaviv_cmdbuf_get_va(cmd));
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index afdd55ddf821..726090d7a6ac 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -15,6 +15,7 @@
*/
#include <linux/reservation.h>
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
@@ -332,8 +333,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
stream = drm_malloc_ab(1, args->stream_size);
- cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8,
- args->nr_bos);
+ cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
+ ALIGN(args->stream_size, 8) + 8,
+ args->nr_bos);
if (!bos || !relocs || !stream || !cmdbuf) {
ret = -ENOMEM;
goto err_submit_cmds;
@@ -422,7 +424,7 @@ err_submit_objects:
err_submit_cmds:
/* if we still own the cmdbuf */
if (cmdbuf)
- etnaviv_gpu_cmdbuf_free(cmdbuf);
+ etnaviv_cmdbuf_free(cmdbuf);
if (stream)
drm_free_large(stream);
if (bos)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 0a67124bb2a4..130d7d517a19 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -18,6 +18,8 @@
#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
+
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
@@ -546,6 +548,37 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
}
+static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
+{
+ /*
+ * Base value for VIVS_PM_PULSE_EATER register on models where it
+ * cannot be read, extracted from vivante kernel driver.
+ */
+ u32 pulse_eater = 0x01590880;
+
+ if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
+ etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
+ pulse_eater |= BIT(23);
+
+ }
+
+ if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
+ etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
+ pulse_eater &= ~BIT(16);
+ pulse_eater |= BIT(17);
+ }
+
+ if ((gpu->identity.revision > 0x5420) &&
+ (gpu->identity.features & chipFeatures_PIPE_3D))
+ {
+ /* Performance fix: disable internal DFS */
+ pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
+ pulse_eater |= BIT(18);
+ }
+
+ gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
+}
+
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
u16 prefetch;
@@ -586,6 +619,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
}
+ /* setup the pulse eater */
+ etnaviv_gpu_setup_pulse_eater(gpu);
+
/* setup the MMU */
etnaviv_iommu_restore(gpu);
@@ -593,7 +629,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_init(gpu);
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
- etnaviv_gpu_start_fe(gpu, etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer),
+ etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
prefetch);
}
@@ -658,8 +694,15 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail;
}
+ gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
+ if (IS_ERR(gpu->cmdbuf_suballoc)) {
+ dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
+ ret = PTR_ERR(gpu->cmdbuf_suballoc);
+ goto fail;
+ }
+
/* Create buffer: */
- gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
+ gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
if (!gpu->buffer) {
ret = -ENOMEM;
dev_err(gpu->dev, "could not create command buffer\n");
@@ -667,7 +710,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
- gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
+ etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
ret = -EINVAL;
dev_err(gpu->dev,
"command buffer outside valid memory window\n");
@@ -694,7 +737,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
return 0;
free_buffer:
- etnaviv_gpu_cmdbuf_free(gpu->buffer);
+ etnaviv_cmdbuf_free(gpu->buffer);
gpu->buffer = NULL;
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
@@ -1117,41 +1160,6 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
* Cmdstream submission/retirement:
*/
-struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
- size_t nr_bos)
-{
- struct etnaviv_cmdbuf *cmdbuf;
- size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
- sizeof(*cmdbuf));
-
- cmdbuf = kzalloc(sz, GFP_KERNEL);
- if (!cmdbuf)
- return NULL;
-
- if (gpu->mmu->version == ETNAVIV_IOMMU_V2)
- size = ALIGN(size, SZ_4K);
-
- cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
- GFP_KERNEL);
- if (!cmdbuf->vaddr) {
- kfree(cmdbuf);
- return NULL;
- }
-
- cmdbuf->gpu = gpu;
- cmdbuf->size = size;
-
- return cmdbuf;
-}
-
-void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
-{
- etnaviv_iommu_put_cmdbuf_va(cmdbuf->gpu, cmdbuf);
- dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
- cmdbuf->paddr);
- kfree(cmdbuf);
-}
-
static void retire_worker(struct work_struct *work)
{
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
@@ -1177,7 +1185,7 @@ static void retire_worker(struct work_struct *work)
etnaviv_gem_mapping_unreference(mapping);
}
- etnaviv_gpu_cmdbuf_free(cmdbuf);
+ etnaviv_cmdbuf_free(cmdbuf);
/*
* We need to balance the runtime PM count caused by
* each submission. Upon submission, we increment
@@ -1593,10 +1601,15 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
#endif
if (gpu->buffer) {
- etnaviv_gpu_cmdbuf_free(gpu->buffer);
+ etnaviv_cmdbuf_free(gpu->buffer);
gpu->buffer = NULL;
}
+ if (gpu->cmdbuf_suballoc) {
+ etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
+ gpu->cmdbuf_suballoc = NULL;
+ }
+
if (gpu->mmu) {
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 8c6b824e9d0a..1c0606ea7d5e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -92,6 +92,7 @@ struct etnaviv_event {
struct dma_fence *fence;
};
+struct etnaviv_cmdbuf_suballoc;
struct etnaviv_cmdbuf;
struct etnaviv_gpu {
@@ -135,6 +136,7 @@ struct etnaviv_gpu {
int irq;
struct etnaviv_iommu *mmu;
+ struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
/* Power Control: */
struct clk *clk_bus;
@@ -150,29 +152,6 @@ struct etnaviv_gpu {
struct work_struct recover_work;
};
-struct etnaviv_cmdbuf {
- /* device this cmdbuf is allocated for */
- struct etnaviv_gpu *gpu;
- /* user context key, must be unique between all active users */
- struct etnaviv_file_private *ctx;
- /* cmdbuf properties */
- void *vaddr;
- dma_addr_t paddr;
- u32 size;
- u32 user_size;
- /* vram node used if the cmdbuf is mapped through the MMUv2 */
- struct drm_mm_node vram_node;
- /* fence after which this buffer is to be disposed */
- struct dma_fence *fence;
- /* target exec state */
- u32 exec_state;
- /* per GPU in-flight list */
- struct list_head node;
- /* BOs attached to this command buffer */
- unsigned int nr_bos;
- struct etnaviv_vram_mapping *bo_map[0];
-};
-
static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
{
etnaviv_writel(data, gpu->mmio + reg);
@@ -211,9 +190,6 @@ int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
-struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
- u32 size, size_t nr_bos);
-void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 81f1583a7946..7a7c97f599d7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -184,7 +184,7 @@ static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
}
-static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
.ops = {
.domain_free = etnaviv_domain_free,
.map = etnaviv_iommuv1_map,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 7e9c4d210a84..cbe447ac5974 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "etnaviv_iommu.h"
@@ -229,7 +230,7 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
}
-static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
.ops = {
.domain_free = etnaviv_iommuv2_domain_free,
.map = etnaviv_iommuv2_map,
@@ -254,7 +255,8 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)etnaviv_domain->mtlb_dma,
(u32)etnaviv_domain->bad_page_dma);
- etnaviv_gpu_start_fe(gpu, gpu->buffer->paddr, prefetch);
+ etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
+ prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index fe0e85b41310..f103e787de94 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -15,6 +15,7 @@
*/
#include "common.xml.h"
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
@@ -107,24 +108,21 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct drm_mm_node *node, size_t size)
{
struct etnaviv_vram_mapping *free = NULL;
+ enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
int ret;
lockdep_assert_held(&mmu->lock);
while (1) {
struct etnaviv_vram_mapping *m, *n;
+ struct drm_mm_scan scan;
struct list_head list;
bool found;
- /*
- * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
- * drm_mm into giving out a low IOVA after address space
- * rollover. This needs a proper fix.
- */
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
- size, 0, mmu->last_iova, ~0UL,
- mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
-
+ size, 0, 0,
+ mmu->last_iova, U64_MAX,
+ mode);
if (ret != -ENOSPC)
break;
@@ -139,7 +137,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
}
/* Try to retire some entries */
- drm_mm_init_scan(&mmu->mm, size, 0, 0);
+ drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
found = 0;
INIT_LIST_HEAD(&list);
@@ -156,7 +154,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
continue;
list_add(&free->scan_node, &list);
- if (drm_mm_scan_add_block(&free->vram_node)) {
+ if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
found = true;
break;
}
@@ -165,7 +163,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
if (!found) {
/* Nothing found, clean up and fail */
list_for_each_entry_safe(m, n, &list, scan_node)
- BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
+ BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
break;
}
@@ -176,7 +174,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
* can leave the block pinned.
*/
list_for_each_entry_safe(m, n, &list, scan_node)
- if (!drm_mm_scan_remove_block(&m->vram_node))
+ if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
list_del_init(&m->scan_node);
/*
@@ -191,13 +189,12 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
list_del_init(&m->scan_node);
}
+ mode = DRM_MM_INSERT_EVICT;
+
/*
* We removed enough mappings so that the new allocation will
- * succeed. Ensure that the MMU will be flushed before the
- * associated commit requesting this mapping, and retry the
- * allocation one more time.
+ * succeed, retry the allocation one more time.
*/
- mmu->need_flush = true;
}
return ret;
@@ -249,6 +246,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
}
list_add_tail(&mapping->mmu_node, &mmu->mappings);
+ mmu->need_flush = true;
mutex_unlock(&mmu->lock);
return ret;
@@ -266,6 +264,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
etnaviv_iommu_remove_mapping(mmu, mapping);
list_del(&mapping->mmu_node);
+ mmu->need_flush = true;
mutex_unlock(&mmu->lock);
}
@@ -321,55 +320,50 @@ void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
etnaviv_iommuv2_restore(gpu);
}
-u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
- struct etnaviv_cmdbuf *buf)
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
+ struct drm_mm_node *vram_node, size_t size,
+ u32 *iova)
{
struct etnaviv_iommu *mmu = gpu->mmu;
if (mmu->version == ETNAVIV_IOMMU_V1) {
- return buf->paddr - gpu->memory_base;
+ *iova = paddr - gpu->memory_base;
+ return 0;
} else {
int ret;
- if (buf->vram_node.allocated)
- return (u32)buf->vram_node.start;
-
mutex_lock(&mmu->lock);
- ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
- buf->size + SZ_64K);
+ ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
if (ret < 0) {
mutex_unlock(&mmu->lock);
- return 0;
+ return ret;
}
- ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
- buf->size, IOMMU_READ);
+ ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
+ IOMMU_READ);
if (ret < 0) {
- drm_mm_remove_node(&buf->vram_node);
+ drm_mm_remove_node(vram_node);
mutex_unlock(&mmu->lock);
- return 0;
+ return ret;
}
- /*
- * At least on GC3000 the FE MMU doesn't properly flush old TLB
- * entries. Make sure to space the command buffers out in a way
- * that the FE MMU prefetch won't load invalid entries.
- */
- mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
+ mmu->last_iova = vram_node->start + size;
gpu->mmu->need_flush = true;
mutex_unlock(&mmu->lock);
- return (u32)buf->vram_node.start;
+ *iova = (u32)vram_node->start;
+ return 0;
}
}
-void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
- struct etnaviv_cmdbuf *buf)
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
+ struct drm_mm_node *vram_node, size_t size,
+ u32 iova)
{
struct etnaviv_iommu *mmu = gpu->mmu;
- if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
+ if (mmu->version == ETNAVIV_IOMMU_V2) {
mutex_lock(&mmu->lock);
- iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
- drm_mm_remove_node(&buf->vram_node);
+ iommu_unmap(mmu->domain,iova, size);
+ drm_mm_remove_node(vram_node);
mutex_unlock(&mmu->lock);
}
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index e787e49c9693..54be289e5981 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -62,10 +62,12 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
struct etnaviv_vram_mapping *mapping);
void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
-u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
- struct etnaviv_cmdbuf *buf);
-void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
- struct etnaviv_cmdbuf *buf);
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
+ struct drm_mm_node *vram_node, size_t size,
+ u32 *iova);
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
+ struct drm_mm_node *vram_node, size_t size,
+ u32 iova);
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index d706ca4e2f02..1d185347c64c 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -19,7 +19,6 @@ comment "CRTCs"
config DRM_EXYNOS_FIMD
bool "FIMD"
depends on !FB_S3C
- select FB_MODE_HELPERS
select MFD_SYSCON
help
Choose this option if you want to use Exynos FIMD for DRM.
@@ -32,7 +31,6 @@ config DRM_EXYNOS5433_DECON
config DRM_EXYNOS7_DECON
bool "DECON on Exynos7"
depends on !FB_S3C
- select FB_MODE_HELPERS
help
Choose this option if you want to use Exynos DECON for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 75eeb831ed6a..0fd6f7a18364 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -13,9 +13,11 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <video/exynos5433_decon.h>
@@ -25,6 +27,9 @@
#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
+#define DSD_CFG_MUX 0x1004
+#define DSD_CFG_MUX_TE_UNMASK_GLOBAL BIT(13)
+
#define WINDOWS_NR 3
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
@@ -57,6 +62,7 @@ struct decon_context {
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
void __iomem *addr;
+ struct regmap *sysreg;
struct clk *clks[ARRAY_SIZE(decon_clks_name)];
int pipe;
unsigned long flags;
@@ -118,18 +124,29 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
static void decon_setup_trigger(struct decon_context *ctx)
{
- u32 val = !(ctx->out_type & I80_HW_TRG)
- ? TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
- TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
- : TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
- TRIGCON_HWTRIGMASK | TRIGCON_HWTRIGEN;
- writel(val, ctx->addr + DECON_TRIGCON);
+ if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
+ return;
+
+ if (!(ctx->out_type & I80_HW_TRG)) {
+ writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
+ | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
+ ctx->addr + DECON_TRIGCON);
+ return;
+ }
+
+ writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | TRIGCON_HWTRIGMASK
+ | TRIGCON_HWTRIGEN, ctx->addr + DECON_TRIGCON);
+
+ if (regmap_update_bits(ctx->sysreg, DSD_CFG_MUX,
+ DSD_CFG_MUX_TE_UNMASK_GLOBAL, ~0))
+ DRM_ERROR("Cannot update sysreg.\n");
}
static void decon_commit(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
struct drm_display_mode *m = &crtc->base.mode;
+ bool interlaced = false;
u32 val;
if (test_bit(BIT_SUSPENDED, &ctx->flags))
@@ -140,13 +157,16 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
m->crtc_hsync_end = m->crtc_htotal - 92;
m->crtc_vsync_start = m->crtc_vdisplay + 1;
m->crtc_vsync_end = m->crtc_vsync_start + 1;
+ if (m->flags & DRM_MODE_FLAG_INTERLACE)
+ interlaced = true;
}
- if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
- decon_setup_trigger(ctx);
+ decon_setup_trigger(ctx);
/* lcd on and use command if */
val = VIDOUT_LCD_ON;
+ if (interlaced)
+ val |= VIDOUT_INTERLACE_EN_F;
if (ctx->out_type & IFTYPE_I80) {
val |= VIDOUT_COMMAND_IF;
} else {
@@ -155,15 +175,21 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
writel(val, ctx->addr + DECON_VIDOUTCON0);
- val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
- VIDTCON2_HOZVAL(m->hdisplay - 1);
+ if (interlaced)
+ val = VIDTCON2_LINEVAL(m->vdisplay / 2 - 1) |
+ VIDTCON2_HOZVAL(m->hdisplay - 1);
+ else
+ val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
+ VIDTCON2_HOZVAL(m->hdisplay - 1);
writel(val, ctx->addr + DECON_VIDTCON2);
if (!(ctx->out_type & IFTYPE_I80)) {
- val = VIDTCON00_VBPD_F(
- m->crtc_vtotal - m->crtc_vsync_end - 1) |
- VIDTCON00_VFPD_F(
- m->crtc_vsync_start - m->crtc_vdisplay - 1);
+ int vbp = m->crtc_vtotal - m->crtc_vsync_end;
+ int vfp = m->crtc_vsync_start - m->crtc_vdisplay;
+
+ if (interlaced)
+ vbp = vbp / 2 - 1;
+ val = VIDTCON00_VBPD_F(vbp - 1) | VIDTCON00_VFPD_F(vfp - 1);
writel(val, ctx->addr + DECON_VIDTCON00);
val = VIDTCON01_VSPW_F(
@@ -195,7 +221,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val = readl(ctx->addr + DECON_WINCONx(win));
val &= ~WINCONx_BPPMODE_MASK;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_XRGB1555:
val |= WINCONx_BPPMODE_16BPP_I1555;
val |= WINCONx_HAWSWP_F;
@@ -221,7 +247,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
return;
}
- DRM_DEBUG_KMS("bpp = %u\n", fb->bits_per_pixel);
+ DRM_DEBUG_KMS("bpp = %u\n", fb->format->cpp[0] * 8);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -270,7 +296,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
struct decon_context *ctx = crtc->ctx;
struct drm_framebuffer *fb = state->base.fb;
unsigned int win = plane->index;
- unsigned int bpp = fb->bits_per_pixel >> 3;
+ unsigned int bpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0);
u32 val;
@@ -278,12 +304,22 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
if (test_bit(BIT_SUSPENDED, &ctx->flags))
return;
- val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
- writel(val, ctx->addr + DECON_VIDOSDxA(win));
+ if (crtc->base.mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ val = COORDINATE_X(state->crtc.x) |
+ COORDINATE_Y(state->crtc.y / 2);
+ writel(val, ctx->addr + DECON_VIDOSDxA(win));
+
+ val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
+ COORDINATE_Y((state->crtc.y + state->crtc.h) / 2 - 1);
+ writel(val, ctx->addr + DECON_VIDOSDxB(win));
+ } else {
+ val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
+ writel(val, ctx->addr + DECON_VIDOSDxA(win));
- val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
- COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
- writel(val, ctx->addr + DECON_VIDOSDxB(win));
+ val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
+ COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
+ writel(val, ctx->addr + DECON_VIDOSDxB(win));
+ }
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
VIDOSD_Wx_ALPHA_B_F(0x0);
@@ -355,8 +391,6 @@ static void decon_swreset(struct decon_context *ctx)
udelay(10);
}
- WARN(tries == 0, "failed to disable DECON\n");
-
writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
for (tries = 2000; tries; --tries) {
if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET)
@@ -467,7 +501,7 @@ err:
clk_disable_unprepare(ctx->clks[i]);
}
-static struct exynos_drm_crtc_ops decon_crtc_ops = {
+static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
.enable_vblank = decon_enable_vblank,
@@ -557,6 +591,13 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
if (val) {
writel(val, ctx->addr + DECON_VIDINTCON1);
+ if (ctx->out_type & IFTYPE_HDMI) {
+ val = readl(ctx->addr + DECON_VIDOUTCON0);
+ val &= VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F;
+ if (val ==
+ (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
+ return IRQ_HANDLED;
+ }
drm_crtc_handle_vblank(&ctx->crtc->base);
}
@@ -637,6 +678,15 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->out_type |= IFTYPE_I80;
}
+ if (ctx->out_type | I80_HW_TRG) {
+ ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,disp-sysreg");
+ if (IS_ERR(ctx->sysreg)) {
+ dev_err(dev, "failed to get system register\n");
+ return PTR_ERR(ctx->sysreg);
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
struct clk *clk;
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f4d5a2133777..f9ab19e205e2 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -281,7 +281,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_BPPMODE_MASK;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_RGB565:
val |= WINCONx_BPPMODE_16BPP_565;
val |= WINCONx_BURSTLEN_16WORD;
@@ -330,7 +330,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
+ DRM_DEBUG_KMS("bpp = %d\n", fb->format->cpp[0] * 8);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -340,7 +340,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
* movement causes unstable DMA which results into iommu crash/tear.
*/
- padding = (fb->pitches[0] / (fb->bits_per_pixel >> 3)) - fb->width;
+ padding = (fb->pitches[0] / fb->format->cpp[0]) - fb->width;
if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
@@ -407,7 +407,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int last_x;
unsigned int last_y;
unsigned int win = plane->index;
- unsigned int bpp = fb->bits_per_pixel >> 3;
+ unsigned int bpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 528229faffe4..1ef0be338b85 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -99,7 +99,6 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
struct drm_connector *connector)
{
struct exynos_dp_device *dp = to_dp(plat_data);
- struct drm_encoder *encoder = &dp->encoder;
int ret;
drm_connector_register(connector);
@@ -107,9 +106,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
/* Pre-empt DP connector creation if there's a bridge */
if (dp->ptn_bridge) {
- bridge->next = dp->ptn_bridge;
- dp->ptn_bridge->encoder = encoder;
- ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge);
+ ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge);
if (ret) {
DRM_ERROR("Failed to attach bridge to drm\n");
bridge->next = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 2530bf57716a..5367b6664fe3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -39,6 +39,14 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
if (exynos_crtc->ops->disable)
exynos_crtc->ops->disable(exynos_crtc);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
}
static void
@@ -109,9 +117,6 @@ static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- struct exynos_drm_private *private = crtc->dev->dev_private;
-
- private->crtc[exynos_crtc->pipe] = NULL;
drm_crtc_cleanup(crtc);
kfree(exynos_crtc);
@@ -134,7 +139,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
void *ctx)
{
struct exynos_drm_crtc *exynos_crtc;
- struct exynos_drm_private *private = drm_dev->dev_private;
struct drm_crtc *crtc;
int ret;
@@ -149,8 +153,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
crtc = &exynos_crtc->base;
- private->crtc[pipe] = crtc;
-
ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL,
&exynos_crtc_funcs, NULL);
if (ret < 0)
@@ -209,23 +211,3 @@ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
if (exynos_crtc->ops->te_handler)
exynos_crtc->ops->te_handler(exynos_crtc);
}
-
-void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc,
- struct drm_file *file)
-{
- struct drm_pending_vblank_event *e;
- unsigned long flags;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
- e = crtc->state->event;
- if (e && e->base.file_priv == file)
- crtc->state->event = NULL;
- else
- e = NULL;
-
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-
- if (e)
- drm_event_cancel_free(crtc->dev, &e->base);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index cfdcf3e4eb1b..6a581a8af465 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -40,8 +40,4 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
*/
void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
-/* This function cancels a page flip request. */
-void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc,
- struct drm_file *file);
-
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 739180ac3da5..035d02ecffcd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -38,56 +38,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-struct exynos_atomic_commit {
- struct work_struct work;
- struct drm_device *dev;
- struct drm_atomic_state *state;
- u32 crtcs;
-};
-
-static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
-{
- struct drm_device *dev = commit->dev;
- struct exynos_drm_private *priv = dev->dev_private;
- struct drm_atomic_state *state = commit->state;
-
- drm_atomic_helper_commit_modeset_disables(dev, state);
-
- drm_atomic_helper_commit_modeset_enables(dev, state);
-
- /*
- * Exynos can't update planes with CRTCs and encoders disabled,
- * its updates routines, specially for FIMD, requires the clocks
- * to be enabled. So it is necessary to handle the modeset operations
- * *before* the commit_planes() step, this way it will always
- * have the relevant clocks enabled to perform the update.
- */
-
- drm_atomic_helper_commit_planes(dev, state, 0);
-
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_state_put(state);
-
- spin_lock(&priv->lock);
- priv->pending &= ~commit->crtcs;
- spin_unlock(&priv->lock);
-
- wake_up_all(&priv->wait);
-
- kfree(commit);
-}
-
-static void exynos_drm_atomic_work(struct work_struct *work)
-{
- struct exynos_atomic_commit *commit = container_of(work,
- struct exynos_atomic_commit, work);
-
- exynos_atomic_commit_complete(commit);
-}
-
static struct device *exynos_drm_get_dma_device(void);
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
@@ -186,7 +136,7 @@ err_free_private:
return ret;
}
-static int exynos_drm_unload(struct drm_device *dev)
+static void exynos_drm_unload(struct drm_device *dev)
{
exynos_drm_device_subdrv_remove(dev);
@@ -200,67 +150,6 @@ static int exynos_drm_unload(struct drm_device *dev)
kfree(dev->dev_private);
dev->dev_private = NULL;
-
- return 0;
-}
-
-static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
-{
- bool pending;
-
- spin_lock(&priv->lock);
- pending = priv->pending & crtcs;
- spin_unlock(&priv->lock);
-
- return pending;
-}
-
-int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
- bool nonblock)
-{
- struct exynos_drm_private *priv = dev->dev_private;
- struct exynos_atomic_commit *commit;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- int i, ret;
-
- commit = kzalloc(sizeof(*commit), GFP_KERNEL);
- if (!commit)
- return -ENOMEM;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret) {
- kfree(commit);
- return ret;
- }
-
- /* This is the point of no return */
-
- INIT_WORK(&commit->work, exynos_drm_atomic_work);
- commit->dev = dev;
- commit->state = state;
-
- /* Wait until all affected CRTCs have completed previous commits and
- * mark them as pending.
- */
- for_each_crtc_in_state(state, crtc, crtc_state, i)
- commit->crtcs |= drm_crtc_mask(crtc);
-
- wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
-
- spin_lock(&priv->lock);
- priv->pending |= commit->crtcs;
- spin_unlock(&priv->lock);
-
- drm_atomic_helper_swap_state(state, true);
-
- drm_atomic_state_get(state);
- if (nonblock)
- schedule_work(&commit->work);
- else
- exynos_atomic_commit_complete(commit);
-
- return 0;
}
int exynos_atomic_check(struct drm_device *dev,
@@ -309,12 +198,7 @@ err_file_priv_free:
static void exynos_drm_preclose(struct drm_device *dev,
struct drm_file *file)
{
- struct drm_crtc *crtc;
-
exynos_drm_subdrv_close(dev, file);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- exynos_drm_crtc_cancel_page_flip(crtc, file);
}
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 80c4d5b81689..cf6e08cb35a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -211,12 +211,6 @@ struct drm_exynos_file_private {
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
- /*
- * created crtc object would be contained at this array and
- * this array is used to be aware of which crtc did it request vblank.
- */
- struct drm_crtc *crtc[MAX_CRTC];
-
struct device *dma_dev;
void *mapping;
@@ -231,9 +225,9 @@ struct exynos_drm_private {
static inline struct exynos_drm_crtc *
exynos_drm_crtc_from_pipe(struct drm_device *dev, int pipe)
{
- struct exynos_drm_private *private = dev->dev_private;
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
- return to_exynos_crtc(private->crtc[pipe]);
+ return to_exynos_crtc(crtc);
}
static inline struct device *to_dma_dev(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e07cb1fe4860..812e2ec0761d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1718,10 +1718,8 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
}
bridge = of_drm_find_bridge(dsi->bridge_node);
- if (bridge) {
- encoder->bridge = bridge;
- drm_bridge_attach(drm_dev, bridge);
- }
+ if (bridge)
+ drm_bridge_attach(encoder, bridge, NULL);
return mipi_dsi_host_register(&dsi->dsi_host);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 23cce0a3f5fc..c77a5aced81a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -126,7 +126,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
+ mode_cmd->offsets[i];
}
- drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd);
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret < 0) {
@@ -187,11 +187,40 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
return exynos_fb->dma_addr[index];
}
+static void exynos_drm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ /*
+ * Exynos can't update planes with CRTCs and encoders disabled,
+ * its updates routines, specially for FIMD, requires the clocks
+ * to be enabled. So it is necessary to handle the modeset operations
+ * *before* the commit_planes() step, this way it will always
+ * have the relevant clocks enabled to perform the update.
+ */
+ drm_atomic_helper_commit_planes(dev, state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ drm_atomic_helper_commit_hw_done(state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
+ .atomic_commit_tail = exynos_drm_atomic_commit_tail,
+};
+
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
.atomic_check = exynos_atomic_check,
- .atomic_commit = exynos_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
void exynos_drm_mode_config_init(struct drm_device *dev)
@@ -208,4 +237,5 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.max_height = 4096;
dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
+ dev->mode_config.helper_private = &exynos_drm_mode_config_helpers;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 9f35deb56170..bcdb2720b68e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -76,7 +76,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
{
struct fb_info *fbi;
struct drm_framebuffer *fb = helper->fb;
- unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
+ unsigned int size = fb->width * fb->height * fb->format->cpp[0];
unsigned int nr_pages;
unsigned long offset;
@@ -90,7 +90,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->fbops = &exynos_drm_fb_ops;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
nr_pages = exynos_gem->size >> PAGE_SHIFT;
@@ -103,7 +103,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EIO;
}
- offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
+ offset = fbi->var.xoffset * fb->format->cpp[0];
offset += fbi->var.yoffset * fb->pitches[0];
fbi->screen_base = exynos_gem->kvaddr + offset;
@@ -208,7 +208,6 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
struct exynos_drm_fbdev *fbdev;
struct exynos_drm_private *private = dev->dev_private;
struct drm_fb_helper *helper;
- unsigned int num_crtc;
int ret;
if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
@@ -225,9 +224,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
- num_crtc = dev->mode_config.num_crtc;
-
- ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
+ ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
if (ret < 0) {
DRM_ERROR("failed to initialize drm fb helper.\n");
goto err_init;
@@ -270,10 +267,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
- if (fb) {
- drm_framebuffer_unregister_private(fb);
+ if (fb)
drm_framebuffer_remove(fb);
- }
}
drm_fb_helper_unregister_fbi(fb_helper);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e2e405170d35..a9fa444c6053 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -125,10 +125,8 @@ static struct fimd_driver_data exynos3_fimd_driver_data = {
.timing_base = 0x20000,
.lcdblk_offset = 0x210,
.lcdblk_bypass_shift = 1,
- .trg_type = I80_HW_TRG,
.has_shadowcon = 1,
.has_vidoutcon = 1,
- .has_trigger_per_te = 1,
};
static struct fimd_driver_data exynos4_fimd_driver_data = {
@@ -738,7 +736,7 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
unsigned long val, size, offset;
unsigned int last_x, last_y, buf_offsize, line_size;
unsigned int win = plane->index;
- unsigned int bpp = fb->bits_per_pixel >> 3;
+ unsigned int bpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
@@ -804,7 +802,7 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
}
- fimd_win_set_pixfmt(ctx, win, fb->pixel_format, state->src.w);
+ fimd_win_set_pixfmt(ctx, win, fb->format->format, state->src.w);
/* hardware window 0 doesn't support color key. */
if (win != 0)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fbd13fabdf2d..2b8bf2dd6387 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1193,6 +1193,17 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
if (!node)
return -ENOMEM;
+ /*
+ * To avoid an integer overflow for the later size computations, we
+ * enforce a maximum number of submitted commands here. This limit is
+ * sufficient for all conceivable usage cases of the G2D.
+ */
+ if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
+ req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
+ dev_err(dev, "number of submitted G2D commands exceeds limit\n");
+ return -EINVAL;
+ }
+
node->event = NULL;
if (req->event_type != G2D_EVENT_NOT) {
@@ -1250,7 +1261,11 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
}
- /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
+ /*
+ * Check the size of cmdlist. The 2 that is added last comes from
+ * the implicit G2D_BITBLT_START that is appended once we have
+ * checked all the submitted commands.
+ */
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
@@ -1668,7 +1683,7 @@ struct platform_driver g2d_driver = {
.probe = g2d_probe,
.remove = g2d_remove,
.driver = {
- .name = "s5p-g2d",
+ .name = "exynos-drm-g2d",
.owner = THIS_MODULE,
.pm = &g2d_pm_ops,
.of_match_table = exynos_g2d_match,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index a0def0be6d65..2ef43d403eaa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -19,6 +19,7 @@
#include <linux/of_graph.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -269,35 +270,9 @@ static int parse_dt(struct exynos_mic *mic)
}
nodes[j++] = remote_node;
- switch (i) {
- case ENDPOINT_DECON_NODE:
- /* decon node */
- if (of_get_child_by_name(remote_node,
- "i80-if-timings"))
- mic->i80_mode = 1;
-
- break;
- case ENDPOINT_DSI_NODE:
- /* panel node */
- remote_node = get_remote_node(remote_node, 1);
- if (!remote_node) {
- ret = -EPIPE;
- goto exit;
- }
- nodes[j++] = remote_node;
-
- ret = of_get_videomode(remote_node,
- &mic->vm, 0);
- if (ret) {
- DRM_ERROR("mic: failed to get videomode");
- goto exit;
- }
-
- break;
- default:
- DRM_ERROR("mic: Unknown endpoint from MIC");
- break;
- }
+ if (i == ENDPOINT_DECON_NODE &&
+ of_get_child_by_name(remote_node, "i80-if-timings"))
+ mic->i80_mode = 1;
}
exit:
@@ -312,7 +287,6 @@ static void mic_disable(struct drm_bridge *bridge) { }
static void mic_post_disable(struct drm_bridge *bridge)
{
struct exynos_mic *mic = bridge->driver_private;
- int i;
mutex_lock(&mic_mutex);
if (!mic->enabled)
@@ -320,39 +294,43 @@ static void mic_post_disable(struct drm_bridge *bridge)
mic_set_path(mic, 0);
- for (i = NUM_CLKS - 1; i > -1; i--)
- clk_disable_unprepare(mic->clks[i]);
-
+ pm_runtime_put(mic->dev);
mic->enabled = 0;
already_disabled:
mutex_unlock(&mic_mutex);
}
+static void mic_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct exynos_mic *mic = bridge->driver_private;
+
+ mutex_lock(&mic_mutex);
+ drm_display_mode_to_videomode(mode, &mic->vm);
+ mutex_unlock(&mic_mutex);
+}
+
static void mic_pre_enable(struct drm_bridge *bridge)
{
struct exynos_mic *mic = bridge->driver_private;
- int ret, i;
+ int ret;
mutex_lock(&mic_mutex);
if (mic->enabled)
- goto already_enabled;
+ goto unlock;
- for (i = 0; i < NUM_CLKS; i++) {
- ret = clk_prepare_enable(mic->clks[i]);
- if (ret < 0) {
- DRM_ERROR("Failed to enable clock (%s)\n",
- clk_names[i]);
- goto turn_off_clks;
- }
- }
+ ret = pm_runtime_get_sync(mic->dev);
+ if (ret < 0)
+ goto unlock;
mic_set_path(mic, 1);
ret = mic_sw_reset(mic);
if (ret) {
DRM_ERROR("Failed to reset\n");
- goto turn_off_clks;
+ goto turn_off;
}
if (!mic->i80_mode)
@@ -365,10 +343,9 @@ static void mic_pre_enable(struct drm_bridge *bridge)
return;
-turn_off_clks:
- while (--i > -1)
- clk_disable_unprepare(mic->clks[i]);
-already_enabled:
+turn_off:
+ pm_runtime_put(mic->dev);
+unlock:
mutex_unlock(&mic_mutex);
}
@@ -377,6 +354,7 @@ static void mic_enable(struct drm_bridge *bridge) { }
static const struct drm_bridge_funcs mic_bridge_funcs = {
.disable = mic_disable,
.post_disable = mic_post_disable,
+ .mode_set = mic_mode_set,
.pre_enable = mic_pre_enable,
.enable = mic_enable,
};
@@ -401,14 +379,12 @@ static void exynos_mic_unbind(struct device *dev, struct device *master,
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
- int i;
mutex_lock(&mic_mutex);
if (!mic->enabled)
goto already_disabled;
- for (i = NUM_CLKS - 1; i > -1; i--)
- clk_disable_unprepare(mic->clks[i]);
+ pm_runtime_put(mic->dev);
already_disabled:
mutex_unlock(&mic_mutex);
@@ -421,6 +397,41 @@ static const struct component_ops exynos_mic_component_ops = {
.unbind = exynos_mic_unbind,
};
+#ifdef CONFIG_PM
+static int exynos_mic_suspend(struct device *dev)
+{
+ struct exynos_mic *mic = dev_get_drvdata(dev);
+ int i;
+
+ for (i = NUM_CLKS - 1; i > -1; i--)
+ clk_disable_unprepare(mic->clks[i]);
+
+ return 0;
+}
+
+static int exynos_mic_resume(struct device *dev)
+{
+ struct exynos_mic *mic = dev_get_drvdata(dev);
+ int ret, i;
+
+ for (i = 0; i < NUM_CLKS; i++) {
+ ret = clk_prepare_enable(mic->clks[i]);
+ if (ret < 0) {
+ DRM_ERROR("Failed to enable clock (%s)\n",
+ clk_names[i]);
+ while (--i > -1)
+ clk_disable_unprepare(mic->clks[i]);
+ return ret;
+ }
+ }
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_mic_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos_mic_suspend, exynos_mic_resume, NULL)
+};
+
static int exynos_mic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -473,9 +484,18 @@ static int exynos_mic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mic);
+ pm_runtime_enable(dev);
+
+ ret = component_add(dev, &exynos_mic_component_ops);
+ if (ret)
+ goto err_pm;
+
DRM_DEBUG_KMS("MIC has been probed\n");
- return component_add(dev, &exynos_mic_component_ops);
+ return 0;
+
+err_pm:
+ pm_runtime_disable(dev);
err:
return ret;
}
@@ -483,6 +503,7 @@ err:
static int exynos_mic_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &exynos_mic_component_ops);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -497,6 +518,7 @@ struct platform_driver mic_driver = {
.remove = exynos_mic_remove,
.driver = {
.name = "exynos-mic",
+ .pm = &exynos_mic_pm_ops,
.owner = THIS_MODULE,
.of_match_table = exynos_mic_of_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 5ed8b1effe71..88ccc0469316 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -35,6 +35,7 @@
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
#include <linux/hdmi.h>
#include <linux/component.h>
#include <linux/mfd/syscon.h>
@@ -133,6 +134,7 @@ struct hdmi_context {
struct regulator_bulk_data regul_bulk[ARRAY_SIZE(supply)];
struct regulator *reg_hdmi_en;
struct exynos_drm_clk phy_clk;
+ struct drm_bridge *bridge;
};
static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
@@ -509,9 +511,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
{
.pixel_clock = 27000000,
.conf = {
- 0x01, 0x51, 0x22, 0x51, 0x08, 0xfc, 0x88, 0x46,
- 0x72, 0x50, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
- 0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x01, 0x51, 0x2d, 0x75, 0x01, 0x00, 0x88, 0x02,
+ 0x72, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac,
+ 0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
@@ -519,9 +521,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
.pixel_clock = 27027000,
.conf = {
0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3,
- 0x71, 0x50, 0x24, 0x14, 0x24, 0x0f, 0x7c, 0xa5,
- 0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
- 0x28, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+ 0x71, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac,
+ 0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
},
},
{
@@ -587,6 +589,15 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40,
},
},
+ {
+ .pixel_clock = 297000000,
+ .conf = {
+ 0x01, 0x51, 0x3E, 0x05, 0x40, 0xF0, 0x88, 0xC2,
+ 0x52, 0x53, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+ 0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+ },
+ },
};
static const char * const hdmi_clk_gates4[] = {
@@ -788,7 +799,8 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
sizeof(buf));
if (ret > 0) {
hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC);
- hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, ret);
+ hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, 3);
+ hdmi_reg_write_buf(hdata, HDMI_VSI_DATA(0), buf + 3, ret - 3);
}
ret = hdmi_audio_infoframe_init(&frm.audio);
@@ -912,7 +924,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
+ if (hdata->bridge) {
+ encoder->bridge = hdata->bridge;
+ hdata->bridge->encoder = encoder;
+ ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
+ if (ret)
+ DRM_ERROR("Failed to attach bridge\n");
+ }
+
+ return ret;
}
static bool hdmi_mode_fixup(struct drm_encoder *encoder,
@@ -1581,6 +1601,31 @@ static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable)
hdmiphy_disable(hdata);
}
+static int hdmi_bridge_init(struct hdmi_context *hdata)
+{
+ struct device *dev = hdata->dev;
+ struct device_node *ep, *np;
+
+ ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
+ if (!ep)
+ return 0;
+
+ np = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!np) {
+ DRM_ERROR("failed to get remote port parent");
+ return -EINVAL;
+ }
+
+ hdata->bridge = of_drm_find_bridge(np);
+ of_node_put(np);
+
+ if (!hdata->bridge)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+
static int hdmi_resources_init(struct hdmi_context *hdata)
{
struct device *dev = hdata->dev;
@@ -1620,17 +1665,18 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
- if (PTR_ERR(hdata->reg_hdmi_en) == -ENODEV)
- return 0;
+ if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) {
+ if (IS_ERR(hdata->reg_hdmi_en))
+ return PTR_ERR(hdata->reg_hdmi_en);
- if (IS_ERR(hdata->reg_hdmi_en))
- return PTR_ERR(hdata->reg_hdmi_en);
-
- ret = regulator_enable(hdata->reg_hdmi_en);
- if (ret)
- DRM_ERROR("failed to enable hdmi-en regulator\n");
+ ret = regulator_enable(hdata->reg_hdmi_en);
+ if (ret) {
+ DRM_ERROR("failed to enable hdmi-en regulator\n");
+ return ret;
+ }
+ }
- return ret;
+ return hdmi_bridge_init(hdata);
}
static struct of_device_id hdmi_match_types[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index edb20a34c66c..72143ac10525 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -485,7 +485,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
bool crcb_mode = false;
u32 val;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_NV12:
crcb_mode = false;
break;
@@ -494,7 +494,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
break;
default:
DRM_ERROR("pixel format for vp is wrong [%d].\n",
- fb->pixel_format);
+ fb->format->format);
return;
}
@@ -597,7 +597,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
unsigned int fmt;
u32 val;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
fmt = MXR_FORMAT_ARGB4444;
@@ -631,7 +631,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
/* converting dma address base and source offset */
dma_addr = exynos_drm_fb_dma_addr(fb, 0)
- + (state->src.x * fb->bits_per_pixel >> 3)
+ + (state->src.x * fb->format->cpp[0])
+ (state->src.y * fb->pitches[0]);
src_x_offset = 0;
src_y_offset = 0;
@@ -649,7 +649,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
/* setup geometry */
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
- fb->pitches[0] / (fb->bits_per_pixel >> 3));
+ fb->pitches[0] / fb->format->cpp[0]);
/* setup display size */
if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
@@ -681,7 +681,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
mixer_cfg_scan(ctx, mode->vdisplay);
mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
mixer_cfg_layer(ctx, win, priority, true);
- mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->pixel_format));
+ mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->format->format));
/* layer update mandatory for mixer 16.0.33.0 */
if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
@@ -701,7 +701,7 @@ static void vp_win_reset(struct mixer_context *ctx)
unsigned int tries = 100;
vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
- while (tries--) {
+ while (--tries) {
/* waiting until VP_SRESET_PROCESSING is 0 */
if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
break;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 537ca159ffe5..04173235f448 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -94,7 +94,7 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
"Invalid legacyfb_depth. Defaulting to 24bpp\n");
legacyfb_depth = 24;
}
- fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1, 1);
+ fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1);
if (IS_ERR(fsl_dev->fbdev)) {
ret = PTR_ERR(fsl_dev->fbdev);
fsl_dev->fbdev = NULL;
@@ -116,7 +116,7 @@ done:
return ret;
}
-static int fsl_dcu_unload(struct drm_device *dev)
+static void fsl_dcu_unload(struct drm_device *dev)
{
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
@@ -131,8 +131,6 @@ static int fsl_dcu_unload(struct drm_device *dev)
drm_irq_uninstall(dev);
dev->dev_private = NULL;
-
- return 0;
}
static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
@@ -415,10 +413,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
if (ret < 0)
goto unref;
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
- driver->major, driver->minor, driver->patchlevel,
- driver->date, drm->primary->index);
-
return 0;
unref:
@@ -434,7 +428,8 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
{
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
- drm_put_dev(fsl_dev->drm);
+ drm_dev_unregister(fsl_dev->drm);
+ drm_dev_unref(fsl_dev->drm);
clk_disable_unprepare(fsl_dev->clk);
clk_unregister(fsl_dev->pix_clk);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index e9e9aeecf2eb..da9bfd432ca6 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -12,6 +12,8 @@
#ifndef __FSL_DCU_DRM_DRV_H__
#define __FSL_DCU_DRM_DRV_H__
+#include <drm/drm_encoder.h>
+
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_output.h"
#include "fsl_dcu_drm_plane.h"
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index a99f48847420..0a20723aa6e1 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -44,7 +44,7 @@ static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
if (!state->fb || !state->crtc)
return 0;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
@@ -96,7 +96,7 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
gem = drm_fb_cma_get_gem_obj(fb, 0);
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_RGB565:
bpp = FSL_DCU_RGB565;
break;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 05a8ee106879..c3651456c963 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -160,10 +160,7 @@ static int fsl_dcu_attach_endpoint(struct fsl_dcu_drm_device *fsl_dev,
if (!bridge)
return -ENODEV;
- fsl_dev->encoder.bridge = bridge;
- bridge->encoder = &fsl_dev->encoder;
-
- return drm_bridge_attach(fsl_dev->drm, bridge);
+ return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL);
}
int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
index 3194e544ee27..b3d70a63c5a3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -72,10 +72,8 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
return NULL;
tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL);
- if (!tcon) {
- ret = -ENOMEM;
+ if (!tcon)
goto err_node_put;
- }
ret = fsl_tcon_init_regmap(dev, tcon, np);
if (ret) {
@@ -89,9 +87,13 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
goto err_node_put;
}
- of_node_put(np);
- clk_prepare_enable(tcon->ipg_clk);
+ ret = clk_prepare_enable(tcon->ipg_clk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable the TCON clock\n");
+ goto err_node_put;
+ }
+ of_node_put(np);
dev_info(dev, "Using TCON in bypass mode\n");
return tcon;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 8906d67494fc..df11582f1efc 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,6 +1,6 @@
config DRM_GMA500
tristate "Intel GMA5/600 KMS Framebuffer"
- depends on DRM && PCI && X86
+ depends on DRM && PCI && X86 && MMU
select DRM_KMS_HELPER
select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index 0d2bb1682508..c51d9259c7a7 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -254,7 +254,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
offset = psbfb->gtt->offset;
stride = fb->pitches[0];
- switch (fb->depth) {
+ switch (fb->format->depth) {
case 8:
src_format = PSB_2D_SRC_332RGB;
dst_format = PSB_2D_DST_332RGB;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8b44fa542562..da42d2e1d397 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -77,7 +77,7 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
(transp << info->var.transp.offset);
if (regno < 16) {
- switch (fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 16:
((uint32_t *) info->pseudo_palette)[regno] = v;
break;
@@ -244,7 +244,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
fb->gtt = gt;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
if (ret) {
@@ -407,7 +407,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
fbdev->psb_fb_helper.fb = fb;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
strcpy(info->fix.id, "psbdrmfb");
info->flags = FBINFO_DEFAULT;
@@ -564,7 +564,7 @@ int psb_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
- dev_priv->ops->crtcs, INTELFB_CONN_LIMIT);
+ INTELFB_CONN_LIMIT);
if (ret)
goto free;
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 1a1cf7a3b5ef..d1c5642b1c1e 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -59,7 +59,8 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -70,7 +71,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
/* no fb bound */
- if (!crtc->primary->fb) {
+ if (!fb) {
dev_err(dev->dev, "No FB bound\n");
goto gma_pipe_cleaner;
}
@@ -81,19 +82,19 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (ret < 0)
goto gma_pipe_set_base_exit;
start = psbfb->gtt->offset;
- offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
+ offset = y * fb->pitches[0] + x * fb->format->cpp[0];
- REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
+ REG_WRITE(map->stride, fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (crtc->primary->fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
- if (crtc->primary->fb->depth == 15)
+ if (fb->format->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 92e3f93ee682..63c6e08600ae 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -148,7 +148,7 @@ static int check_fb(struct drm_framebuffer *fb)
if (!fb)
return 0;
- switch (fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 8:
case 16:
case 24:
@@ -165,8 +165,9 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->primary->fb;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -178,12 +179,12 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
/* no fb bound */
- if (!crtc->primary->fb) {
+ if (!fb) {
dev_dbg(dev->dev, "No FB bound\n");
return 0;
}
- ret = check_fb(crtc->primary->fb);
+ ret = check_fb(fb);
if (ret)
return ret;
@@ -196,18 +197,18 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
start = psbfb->gtt->offset;
- offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
+ offset = y * fb->pitches[0] + x * fb->format->cpp[0];
- REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
+ REG_WRITE(map->stride, fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (crtc->primary->fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
- if (crtc->primary->fb->depth == 15)
+ if (fb->format->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index da9fd34b9550..0fff269d3fe6 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -599,7 +599,8 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -608,7 +609,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int ret = 0;
/* no fb bound */
- if (!crtc->primary->fb) {
+ if (!fb) {
dev_dbg(dev->dev, "No FB bound\n");
return 0;
}
@@ -617,19 +618,19 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
return 0;
start = psbfb->gtt->offset;
- offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
+ offset = y * fb->pitches[0] + x * fb->format->cpp[0];
- REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
+ REG_WRITE(map->stride, fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (crtc->primary->fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
- if (crtc->primary->fb->depth == 15)
+ if (fb->format->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index ff37ea585664..5ee93ff55608 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -159,7 +159,7 @@ static int psb_do_init(struct drm_device *dev)
return 0;
}
-static int psb_driver_unload(struct drm_device *dev)
+static void psb_driver_unload(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -220,7 +220,6 @@ static int psb_driver_unload(struct drm_device *dev)
dev->dev_private = NULL;
}
gma_power_uninit(dev);
- return 0;
}
static int psb_driver_load(struct drm_device *dev, unsigned long flags)
@@ -407,11 +406,6 @@ out_err:
return ret;
}
-static int psb_driver_device_is_agp(struct drm_device *dev)
-{
- return 0;
-}
-
static inline void get_brightness(struct backlight_device *bd)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -488,7 +482,6 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
- .device_is_agp = psb_driver_device_is_agp,
.irq_preinstall = psb_irq_preinstall,
.irq_postinstall = psb_irq_postinstall,
.irq_uninstall = psb_irq_uninstall,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 2a3b7c684db2..6a10215fc42d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -23,6 +23,7 @@
#include <linux/i2c-algo-bit.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
#include <linux/gpio.h>
#include "gma_display.h"
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 380622a0da35..c7129dc3bdfc 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -1,6 +1,6 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
- depends on DRM && PCI
+ depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 2a1386e33126..c655883d3613 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -122,11 +122,11 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
- reg = state->fb->width * (state->fb->bits_per_pixel / 8);
+ reg = state->fb->width * (state->fb->format->cpp[0]);
/* now line_pad is 16 */
reg = PADDING(16, reg);
- line_l = state->fb->width * state->fb->bits_per_pixel / 8;
+ line_l = state->fb->width * state->fb->format->cpp[0];
line_l = PADDING(16, line_l);
writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
@@ -136,7 +136,7 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
reg &= ~HIBMC_CRT_DISP_CTL_FORMAT_MASK;
reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_FORMAT,
- state->fb->bits_per_pixel / 16);
+ state->fb->format->cpp[0] * 8 / 16);
writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index 9b0696735ba1..d7a4d9095b33 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -121,7 +121,7 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
if (IS_ERR(hi_fbdev->fb)) {
- ret = PTR_ERR(info);
+ ret = PTR_ERR(hi_fbdev->fb);
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
goto out_release_fbi;
}
@@ -135,7 +135,7 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
info->fbops = &hibmc_drm_fb_ops;
drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
- hi_fbdev->fb->fb.depth);
+ hi_fbdev->fb->fb.format->depth);
drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width,
sizes->fb_height);
@@ -200,8 +200,7 @@ int hibmc_fbdev_init(struct hibmc_drm_private *priv)
&hibmc_fbdev_helper_funcs);
/* Now just one crtc and one channel */
- ret = drm_fb_helper_init(priv->dev,
- &hifbdev->helper, 1, 1);
+ ret = drm_fb_helper_init(priv->dev, &hifbdev->helper, 1);
if (ret) {
DRM_ERROR("failed to initialize fb helper: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index e76abf61edae..20732b62d4c9 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -243,8 +243,6 @@ struct ttm_bo_driver hibmc_bo_driver = {
.verify_access = hibmc_bo_verify_access,
.io_mem_reserve = &hibmc_ttm_io_mem_reserve,
.io_mem_free = NULL,
- .lru_tail = &ttm_bo_default_lru_tail,
- .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int hibmc_mm_init(struct hibmc_drm_private *hibmc)
@@ -512,7 +510,7 @@ hibmc_framebuffer_init(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- drm_helper_mode_fill_fb_struct(&hibmc_fb->fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &hibmc_fb->fb, mode_cmd);
hibmc_fb->obj = obj;
ret = drm_framebuffer_init(dev, &hibmc_fb->fb, &hibmc_fb_funcs);
if (ret) {
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 998452ad0fcb..1737e98bc10a 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -709,10 +709,7 @@ static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
int ret;
/* associate the bridge to dsi encoder */
- encoder->bridge = bridge;
- bridge->encoder = encoder;
-
- ret = drm_bridge_attach(dev, bridge);
+ ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
DRM_ERROR("failed to attach external bridge\n");
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index afc2b5d2d5f0..9a0678a33e0d 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -304,8 +304,8 @@ static void ade_set_medianoc_qos(struct ade_crtc *acrtc)
static int ade_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct kirin_drm_private *priv = dev->dev_private;
- struct ade_crtc *acrtc = to_ade_crtc(priv->crtc[pipe]);
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx;
void __iomem *base = ctx->base;
@@ -320,8 +320,8 @@ static int ade_enable_vblank(struct drm_device *dev, unsigned int pipe)
static void ade_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct kirin_drm_private *priv = dev->dev_private;
- struct ade_crtc *acrtc = to_ade_crtc(priv->crtc[pipe]);
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx;
void __iomem *base = ctx->base;
@@ -575,7 +575,6 @@ static const struct drm_crtc_funcs ade_crtc_funcs = {
static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *plane)
{
- struct kirin_drm_private *priv = dev->dev_private;
struct device_node *port;
int ret;
@@ -599,7 +598,6 @@ static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
}
drm_crtc_helper_add(crtc, &ade_crtc_helper_funcs);
- priv->crtc[drm_crtc_index(crtc)] = crtc;
return 0;
}
@@ -617,7 +615,7 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
ch + 1, y, in_h, stride, (u32)obj->paddr);
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
addr, fb->width, fb->height, fmt,
- drm_get_format_name(fb->pixel_format, &format_name));
+ drm_get_format_name(fb->format->format, &format_name));
/* get reg offset */
reg_ctrl = RD_CH_CTRL(ch);
@@ -773,7 +771,7 @@ static void ade_update_channel(struct ade_plane *aplane,
{
struct ade_hw_ctx *ctx = aplane->ctx;
void __iomem *base = ctx->base;
- u32 fmt = ade_get_format(fb->pixel_format);
+ u32 fmt = ade_get_format(fb->format->format);
u32 ch = aplane->ch;
u32 in_w;
u32 in_h;
@@ -835,7 +833,7 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
if (!crtc || !fb)
return 0;
- fmt = ade_get_format(fb->pixel_format);
+ fmt = ade_get_format(fb->format->format);
if (fmt == ADE_FORMAT_UNSUPPORT)
return -EINVAL;
@@ -973,9 +971,9 @@ static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
return 0;
}
-static int ade_drm_init(struct drm_device *dev)
+static int ade_drm_init(struct platform_device *pdev)
{
- struct platform_device *pdev = dev->platformdev;
+ struct drm_device *dev = platform_get_drvdata(pdev);
struct ade_data *ade;
struct ade_hw_ctx *ctx;
struct ade_crtc *acrtc;
@@ -1034,13 +1032,8 @@ static int ade_drm_init(struct drm_device *dev)
return 0;
}
-static void ade_drm_cleanup(struct drm_device *dev)
+static void ade_drm_cleanup(struct platform_device *pdev)
{
- struct platform_device *pdev = dev->platformdev;
- struct ade_data *ade = platform_get_drvdata(pdev);
- struct drm_crtc *crtc = &ade->acrtc.base;
-
- drm_crtc_cleanup(crtc);
}
const struct kirin_dc_ops ade_dc_ops = {
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index ebd5f4fe4c23..7ec93aec7e88 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -42,7 +42,7 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev)
#endif
drm_kms_helper_poll_fini(dev);
drm_vblank_cleanup(dev);
- dc_ops->cleanup(dev);
+ dc_ops->cleanup(to_platform_device(dev->dev));
drm_mode_config_cleanup(dev);
devm_kfree(dev->dev, priv);
dev->dev_private = NULL;
@@ -59,8 +59,7 @@ static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
drm_fbdev_cma_hotplug_event(priv->fbdev);
} else {
priv->fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
+ dev->mode_config.num_connector);
if (IS_ERR(priv->fbdev))
priv->fbdev = NULL;
}
@@ -104,7 +103,7 @@ static int kirin_drm_kms_init(struct drm_device *dev)
kirin_drm_mode_config_init(dev);
/* display controller init */
- ret = dc_ops->init(dev);
+ ret = dc_ops->init(to_platform_device(dev->dev));
if (ret)
goto err_mode_config_cleanup;
@@ -138,7 +137,7 @@ static int kirin_drm_kms_init(struct drm_device *dev)
err_unbind_all:
component_unbind_all(dev->dev, dev);
err_dc_cleanup:
- dc_ops->cleanup(dev);
+ dc_ops->cleanup(to_platform_device(dev->dev));
err_mode_config_cleanup:
drm_mode_config_cleanup(dev);
devm_kfree(dev->dev, priv);
@@ -209,8 +208,6 @@ static int kirin_drm_bind(struct device *dev)
if (IS_ERR(drm_dev))
return PTR_ERR(drm_dev);
- drm_dev->platformdev = to_platform_device(dev);
-
ret = kirin_drm_kms_init(drm_dev);
if (ret)
goto err_drm_dev_unref;
@@ -219,10 +216,6 @@ static int kirin_drm_bind(struct device *dev)
if (ret)
goto err_kms_cleanup;
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, drm_dev->primary->index);
-
return 0;
err_kms_cleanup:
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
index 1a07caf8e7f4..7f60c64915d9 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -15,12 +15,11 @@
/* display controller init/cleanup ops */
struct kirin_dc_ops {
- int (*init)(struct drm_device *dev);
- void (*cleanup)(struct drm_device *dev);
+ int (*init)(struct platform_device *pdev);
+ void (*cleanup)(struct platform_device *pdev);
};
struct kirin_drm_private {
- struct drm_crtc *crtc[MAX_CRTC];
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_fbdev_cma *fbdev;
#endif
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ab4e6cbe1f8b..576a417690d4 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -1190,6 +1190,14 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
+ dev->agp = drm_agp_init(dev);
+ if (dev->agp) {
+ dev->agp->agp_mtrr = arch_phys_wc_add(
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024);
+ }
+
/* Our userspace depends upon the agp mapping support. */
if (!dev->agp)
return -EINVAL;
@@ -1249,19 +1257,3 @@ const struct drm_ioctl_desc i810_ioctls[] = {
};
int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);
-
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i810 is AGP.
- */
-int i810_driver_device_is_agp(struct drm_device *dev)
-{
- return 1;
-}
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 02504a7cfaf2..37fd0906f807 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -60,7 +60,6 @@ static struct drm_driver driver = {
.lastclose = i810_driver_lastclose,
.preclose = i810_driver_preclose,
.set_busid = drm_pci_set_busid,
- .device_is_agp = i810_driver_device_is_agp,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,
.fops = &i810_driver_fops,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 93ec5dc4e7d3..c73d2f2da57b 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -124,7 +124,6 @@ extern int i810_driver_load(struct drm_device *, unsigned long flags);
extern void i810_driver_lastclose(struct drm_device *dev);
extern void i810_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
-extern int i810_driver_device_is_agp(struct drm_device *dev);
extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
extern const struct drm_ioctl_desc i810_ioctls[];
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 51ba630a134b..597648c7a645 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -19,9 +19,12 @@ config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
select PREEMPT_COUNT
+ select I2C_CHARDEV
+ select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
+ select DRM_I915_SW_FENCE_DEBUG_OBJECTS
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -43,3 +46,15 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_SW_FENCE_DEBUG_OBJECTS
+ bool "Enable additional driver debugging for fence objects"
+ depends on DRM_I915
+ select DEBUG_OBJECTS
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 78711dddd937..c62ab45683c0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -24,7 +24,7 @@ i915-y := i915_drv.o \
intel_runtime_pm.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
# GEM code
i915-y += i915_cmd_parser.o \
@@ -55,7 +55,10 @@ i915-y += i915_cmd_parser.o \
intel_uncore.o
# general-purpose microcontroller (GuC) support
-i915-y += intel_guc_loader.o \
+i915-y += intel_uc.o \
+ intel_guc_log.o \
+ intel_guc_loader.o \
+ intel_huc.o \
i915_guc_submission.o
# autogenerated null render state
@@ -117,6 +120,10 @@ i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
# virtual gpu code
i915-y += i915_vgpu.o
+# perf code
+i915-y += i915_perf.o \
+ i915_oa_hsw.o
+
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
include $(src)/gvt/Makefile
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index f7bce8603958..3b6caaca9751 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -41,44 +41,35 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- u32 alloc_flag, search_flag;
+ unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
- int retried = 0;
int ret;
if (high_gm) {
- search_flag = DRM_MM_SEARCH_BELOW;
- alloc_flag = DRM_MM_CREATE_TOP;
node = &vgpu->gm.high_gm_node;
size = vgpu_hidden_sz(vgpu);
- start = gvt_hidden_gmadr_base(gvt);
- end = gvt_hidden_gmadr_end(gvt);
+ start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
+ end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
+ flags = PIN_HIGH;
} else {
- search_flag = DRM_MM_SEARCH_DEFAULT;
- alloc_flag = DRM_MM_CREATE_DEFAULT;
node = &vgpu->gm.low_gm_node;
size = vgpu_aperture_sz(vgpu);
- start = gvt_aperture_gmadr_base(gvt);
- end = gvt_aperture_gmadr_end(gvt);
+ start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
+ end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
+ flags = PIN_MAPPABLE;
}
mutex_lock(&dev_priv->drm.struct_mutex);
-search_again:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
- node, size, 4096, 0,
- start, end, search_flag,
- alloc_flag);
- if (ret) {
- ret = i915_gem_evict_something(&dev_priv->ggtt.base,
- size, 4096, 0, start, end, 0);
- if (ret == 0 && ++retried < 3)
- goto search_again;
-
- gvt_err("fail to alloc %s gm space from host, retried %d\n",
- high_gm ? "high" : "low", retried);
- }
+ ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+ size, I915_GTT_PAGE_SIZE,
+ I915_COLOR_UNEVICTABLE,
+ start, end, flags);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (ret)
+ gvt_err("fail to alloc %s gm space from host\n",
+ high_gm ? "high" : "low");
+
return ret;
}
@@ -264,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
if (request > avail)
goto no_enough_resource;
- vgpu_aperture_sz(vgpu) = request;
+ vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
item = "high GM space";
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
@@ -275,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
if (request > avail)
goto no_enough_resource;
- vgpu_hidden_sz(vgpu) = request;
+ vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
item = "fence";
max = gvt_fence_sz(gvt) - HOST_FENCE;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index e4563984cb1e..b9c8e2407682 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1134,6 +1134,8 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
u32 dword2 = cmd_val(s, 2);
u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
+ info->plane = PRIMARY_PLANE;
+
switch (plane) {
case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
info->pipe = PIPE_A;
@@ -1147,12 +1149,28 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
info->pipe = PIPE_C;
info->event = PRIMARY_C_FLIP_DONE;
break;
+
+ case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
+ info->pipe = PIPE_A;
+ info->event = SPRITE_A_FLIP_DONE;
+ info->plane = SPRITE_PLANE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
+ info->pipe = PIPE_B;
+ info->event = SPRITE_B_FLIP_DONE;
+ info->plane = SPRITE_PLANE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
+ info->pipe = PIPE_C;
+ info->event = SPRITE_C_FLIP_DONE;
+ info->plane = SPRITE_PLANE;
+ break;
+
default:
gvt_err("unknown plane code %d\n", plane);
return -EINVAL;
}
- info->pipe = PRIMARY_PLANE;
info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
info->tile_val = (dword1 & GENMASK(2, 0));
info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
@@ -1598,7 +1616,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
return -ENOMEM;
entry_obj->obj =
- i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
+ i915_gem_object_create(s->vgpu->gvt->dev_priv,
roundup(bb_size, PAGE_SIZE));
if (IS_ERR(entry_obj->obj)) {
ret = PTR_ERR(entry_obj->obj);
@@ -2661,14 +2679,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
struct drm_i915_gem_object *obj;
int ret = 0;
void *map;
- obj = i915_gem_object_create(dev,
+ obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index c0c884aeb30e..6d8fde880c39 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -83,7 +83,7 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
return 0;
}
-/* EDID with 1024x768 as its resolution */
+/* EDID with 1920x1200 as its resolution */
static unsigned char virtual_dp_monitor_edid[] = {
/*Header*/
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
@@ -97,11 +97,16 @@ static unsigned char virtual_dp_monitor_edid[] = {
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
/* Established Timings: maximum resolution is 1024x768 */
0x21, 0x08, 0x00,
- /* Standard Timings. All invalid */
- 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
- /* 18 Byte Data Blocks 1: invalid */
- 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+ /*
+ * Standard Timings.
+ * below new resolutions can be supported:
+ * 1920x1080, 1280x720, 1280x960, 1280x1024,
+ * 1440x900, 1600x1200, 1680x1050
+ */
+ 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
+ 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
+ /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
+ 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
/* 18 Byte Data Blocks 2: invalid */
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
@@ -115,7 +120,7 @@ static unsigned char virtual_dp_monitor_edid[] = {
/* Extension Block Count */
0x00,
/* Checksum */
- 0xef,
+ 0x45,
};
#define DPCD_HEADER_SIZE 0xb
@@ -328,3 +333,15 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu)
else
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
}
+
+/**
+ * intel_vgpu_reset_display- reset vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset vGPU virtual display emulation stuffs
+ *
+ */
+void intel_vgpu_reset_display(struct intel_vgpu *vgpu)
+{
+ emulate_monitor_status_change(vgpu);
+}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index 7a60cb848268..8b234ea961f6 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -158,6 +158,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 34083731669d..46eb9fd3c03f 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -487,7 +487,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- if (wa_ctx->indirect_ctx.size == 0)
+ if (!wa_ctx->indirect_ctx.obj)
return;
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 2fae2a2ca96f..1cb29b2d7dc6 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -48,31 +48,6 @@ struct gvt_firmware_header {
unsigned char data[1];
};
-#define RD(offset) (readl(mmio + offset.reg))
-#define WR(v, offset) (writel(v, mmio + offset.reg))
-
-static void bdw_forcewake_get(void __iomem *mmio)
-{
- WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
-
- RD(ECOBUS);
-
- if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
- gvt_err("fail to wait forcewake idle\n");
-
- WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
-
- if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
- gvt_err("fail to wait forcewake ack\n");
-
- if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
- GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
- gvt_err("fail to wait c0 wake up\n");
-}
-
-#undef RD
-#undef WR
-
#define dev_to_drm_minor(d) dev_get_drvdata((d))
static ssize_t
@@ -91,9 +66,9 @@ static struct bin_attribute firmware_attr = {
.mmap = NULL,
};
-static int expose_firmware_sysfs(struct intel_gvt *gvt,
- void __iomem *mmio)
+static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e;
@@ -132,7 +107,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt,
for (j = 0; j < e->length; j += 4)
*(u32 *)(p + e->offset + j) =
- readl(mmio + e->offset + j);
+ I915_READ_NOTRACE(_MMIO(e->offset + j));
}
memcpy(gvt->firmware.mmio, p, info->mmio_size);
@@ -235,7 +210,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
struct gvt_firmware_header *h;
const struct firmware *fw;
char *path;
- void __iomem *mmio;
void *mem;
int ret;
@@ -260,17 +234,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->mmio = mem;
- mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
- if (!mmio) {
- kfree(path);
- kfree(firmware->cfg_space);
- kfree(firmware->mmio);
- return -EINVAL;
- }
-
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
- bdw_forcewake_get(mmio);
-
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
pdev->revision);
@@ -300,13 +263,11 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
release_firmware(fw);
firmware->firmware_loaded = true;
- pci_iounmap(pdev, mmio);
return 0;
out_free_fw:
release_firmware(fw);
expose_firmware:
- expose_firmware_sysfs(gvt, mmio);
- pci_iounmap(pdev, mmio);
+ expose_firmware_sysfs(gvt);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 47dec4acf7ff..28c92346db0e 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -606,21 +606,33 @@ struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
static inline int init_shadow_page(struct intel_vgpu *vgpu,
struct intel_vgpu_shadow_page *p, int type)
{
+ struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
+
+ daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev, daddr)) {
+ gvt_err("fail to map dma addr\n");
+ return -EINVAL;
+ }
+
p->vaddr = page_address(p->page);
p->type = type;
INIT_HLIST_NODE(&p->node);
- p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
- if (p->mfn == INTEL_GVT_INVALID_ADDR)
- return -EFAULT;
-
+ p->mfn = daddr >> GTT_PAGE_SHIFT;
hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
return 0;
}
-static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
+static inline void clean_shadow_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_shadow_page *p)
{
+ struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+
+ dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
+ PCI_DMA_BIDIRECTIONAL);
+
if (!hlist_unhashed(&p->node))
hash_del(&p->node);
}
@@ -670,7 +682,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
- clean_shadow_page(&spt->shadow_page);
+ clean_shadow_page(spt->vgpu, &spt->shadow_page);
intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
list_del_init(&spt->post_shadow_list);
@@ -1875,8 +1887,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
int page_entry_num = GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
- unsigned long mfn;
int i;
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
return -EINVAL;
@@ -1887,16 +1900,18 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
return -ENOMEM;
}
- mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
- if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
- free_page((unsigned long)scratch_pt);
- return -EFAULT;
+ daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, daddr)) {
+ gvt_err("fail to dmamap scratch_pt\n");
+ __free_page(virt_to_page(scratch_pt));
+ return -ENOMEM;
}
- gtt->scratch_pt[type].page_mfn = mfn;
+ gtt->scratch_pt[type].page_mfn =
+ (unsigned long)(daddr >> GTT_PAGE_SHIFT);
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
- vgpu->id, type, mfn);
+ vgpu->id, type, gtt->scratch_pt[type].page_mfn);
/* Build the tree by full filled the scratch pt with the entries which
* point to the next level scratch pt or scratch page. The
@@ -1930,9 +1945,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{
int i;
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
if (vgpu->gtt.scratch_pt[i].page != NULL) {
+ daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
+ GTT_PAGE_SHIFT);
+ dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(vgpu->gtt.scratch_pt[i].page);
vgpu->gtt.scratch_pt[i].page = NULL;
vgpu->gtt.scratch_pt[i].page_mfn = 0;
@@ -2192,6 +2212,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
void *page;
+ struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
gvt_dbg_core("init gtt\n");
@@ -2209,14 +2231,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
gvt_err("fail to allocate scratch ggtt page\n");
return -ENOMEM;
}
- gvt->gtt.scratch_ggtt_page = virt_to_page(page);
- gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
- if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate scratch ggtt page\n");
- __free_page(gvt->gtt.scratch_ggtt_page);
- return -EFAULT;
+ daddr = dma_map_page(dev, virt_to_page(page), 0,
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, daddr)) {
+ gvt_err("fail to dmamap scratch ggtt page\n");
+ __free_page(virt_to_page(page));
+ return -ENOMEM;
}
+ gvt->gtt.scratch_ggtt_page = virt_to_page(page);
+ gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
@@ -2239,6 +2263,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
+ struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
+ GTT_PAGE_SHIFT);
+
+ dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+
__free_page(gvt->gtt.scratch_ggtt_page);
if (enable_out_of_sync)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index e6bf5c533fbe..3b9d59e457ba 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -68,8 +68,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
*/
int intel_gvt_init_host(void)
{
- int ret;
-
if (intel_gvt_host.initialized)
return 0;
@@ -96,11 +94,6 @@ int intel_gvt_init_host(void)
if (!intel_gvt_host.mpt)
return -EINVAL;
- /* Try to detect if we're running in host instead of VM. */
- ret = intel_gvt_hypervisor_detect_host();
- if (ret)
- return -ENODEV;
-
gvt_dbg_core("Running with hypervisor %s in host mode\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index ab2ea157da4c..1d450627ff65 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2167,7 +2167,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
- MMIO_D(OACONTROL, D_HSW);
+ MMIO_D(GEN7_OACONTROL, D_HSW);
MMIO_D(0x2b00, D_BDW_PLUS);
MMIO_D(0x2360, D_BDW_PLUS);
MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 30e543f5a703..df7f33abd393 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -38,7 +38,6 @@
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
- int (*detect_host)(void);
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index f7be02ac4be1..92bb247e3478 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -176,26 +176,15 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
- u32 changed, masked, unmasked;
u32 imr = *(u32 *)p_data;
- gvt_dbg_irq("write IMR %x with val %x\n",
- reg, imr);
-
- gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
-
- /* figure out newly masked/unmasked bits */
- changed = vgpu_vreg(vgpu, reg) ^ imr;
- masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
- unmasked = masked ^ changed;
-
- gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
- changed, masked, unmasked);
+ gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n",
+ reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr);
vgpu_vreg(vgpu, reg) = imr;
ops->check_pending_irq(vgpu);
- gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
+
return 0;
}
@@ -217,14 +206,11 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
- u32 changed, enabled, disabled;
u32 ier = *(u32 *)p_data;
u32 virtual_ier = vgpu_vreg(vgpu, reg);
- gvt_dbg_irq("write master irq reg %x with val %x\n",
- reg, ier);
-
- gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
+ gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n",
+ reg, ier, virtual_ier, virtual_ier ^ ier);
/*
* GEN8_MASTER_IRQ is a special irq register,
@@ -236,16 +222,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
vgpu_vreg(vgpu, reg) |= ier;
- /* figure out newly enabled/disable bits */
- changed = virtual_ier ^ ier;
- enabled = (virtual_ier & changed) ^ changed;
- disabled = enabled ^ changed;
-
- gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
- changed, enabled, disabled);
-
ops->check_pending_irq(vgpu);
- gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
+
return 0;
}
@@ -268,21 +246,11 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info;
- u32 changed, enabled, disabled;
u32 ier = *(u32 *)p_data;
- gvt_dbg_irq("write IER %x with val %x\n",
- reg, ier);
-
- gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
+ gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n",
+ reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier);
- /* figure out newly enabled/disable bits */
- changed = vgpu_vreg(vgpu, reg) ^ ier;
- enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
- disabled = enabled ^ changed;
-
- gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
- changed, enabled, disabled);
vgpu_vreg(vgpu, reg) = ier;
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
@@ -293,7 +261,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
update_upstream_irq(vgpu, info);
ops->check_pending_irq(vgpu);
- gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
+
return 0;
}
@@ -317,7 +285,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
iir_to_regbase(reg));
u32 iir = *(u32 *)p_data;
- gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
+ gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n",
+ reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir);
if (WARN_ON(!info))
return -EINVAL;
@@ -619,6 +588,10 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+
+ SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
}
/* GEN8 interrupt PCU events */
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 3f656e3a6e5a..0f7f5d97f582 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -77,7 +77,7 @@ struct kvmgt_guest_info {
struct gvt_dma {
struct rb_node node;
gfn_t gfn;
- kvm_pfn_t pfn;
+ unsigned long iova;
};
static inline bool handle_valid(unsigned long handle)
@@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
+static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
+ unsigned long *iova)
+{
+ struct page *page;
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
+
+ page = pfn_to_page(pfn);
+ if (is_error_page(page))
+ return -EFAULT;
+
+ daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, daddr))
+ return -ENOMEM;
+
+ *iova = (unsigned long)(daddr >> PAGE_SHIFT);
+ return 0;
+}
+
+static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
+{
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
+
+ daddr = (dma_addr_t)(iova << PAGE_SHIFT);
+ dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct rb_node *node = vgpu->vdev.cache.rb_node;
@@ -111,21 +140,22 @@ out:
return ret;
}
-static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct gvt_dma *entry;
- kvm_pfn_t pfn;
+ unsigned long iova;
mutex_lock(&vgpu->vdev.cache_lock);
entry = __gvt_cache_find(vgpu, gfn);
- pfn = (entry == NULL) ? 0 : entry->pfn;
+ iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
mutex_unlock(&vgpu->vdev.cache_lock);
- return pfn;
+ return iova;
}
-static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
+static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
+ unsigned long iova)
{
struct gvt_dma *new, *itr;
struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
@@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
return;
new->gfn = gfn;
- new->pfn = pfn;
+ new->iova = iova;
mutex_lock(&vgpu->vdev.cache_lock);
while (*link) {
@@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
}
g1 = gfn;
+ gvt_dma_unmap_iova(vgpu, this->iova);
rc = vfio_unpin_pages(dev, &g1, 1);
WARN_ON(rc != 1);
__gvt_cache_remove_entry(vgpu, this);
@@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
mutex_lock(&vgpu->vdev.cache_lock);
while ((node = rb_first(&vgpu->vdev.cache))) {
dma = rb_entry(node, struct gvt_dma, node);
+ gvt_dma_unmap_iova(vgpu, dma->iova);
gfn = dma->gfn;
vfio_unpin_pages(dev, &gfn, 1);
@@ -965,11 +997,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
sparse->areas[0].offset =
PAGE_ALIGN(vgpu_aperture_offset(vgpu));
sparse->areas[0].size = vgpu_aperture_sz(vgpu);
- if (!caps.buf) {
- kfree(caps.buf);
- caps.buf = NULL;
- caps.size = 0;
- }
break;
case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
@@ -1248,43 +1275,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock);
}
-static bool kvmgt_check_guest(void)
-{
- unsigned int eax, ebx, ecx, edx;
- char s[12];
- unsigned int *i;
-
- eax = KVM_CPUID_SIGNATURE;
- ebx = ecx = edx = 0;
-
- asm volatile ("cpuid"
- : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
- :
- : "cc", "memory");
- i = (unsigned int *)s;
- i[0] = ebx;
- i[1] = ecx;
- i[2] = edx;
-
- return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
-}
-
-/**
- * NOTE:
- * It's actually impossible to check if we are running in KVM host,
- * since the "KVM host" is simply native. So we only dectect guest here.
- */
-static int kvmgt_detect_host(void)
-{
-#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped) {
- gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
- return -ENODEV;
- }
-#endif
- return kvmgt_check_guest() ? -ENODEV : 0;
-}
-
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
{
struct intel_vgpu *itr;
@@ -1390,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
{
- unsigned long pfn;
+ unsigned long iova, pfn;
struct kvmgt_guest_info *info;
struct device *dev;
int rc;
@@ -1399,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
return INTEL_GVT_INVALID_ADDR;
info = (struct kvmgt_guest_info *)handle;
- pfn = gvt_cache_find(info->vgpu, gfn);
- if (pfn != 0)
- return pfn;
+ iova = gvt_cache_find(info->vgpu, gfn);
+ if (iova != INTEL_GVT_INVALID_ADDR)
+ return iova;
pfn = INTEL_GVT_INVALID_ADDR;
dev = mdev_dev(info->vgpu->vdev.mdev);
@@ -1410,9 +1400,16 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
return INTEL_GVT_INVALID_ADDR;
}
+ /* transfer to host iova for GFX to use DMA */
+ rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
+ if (rc) {
+ gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
+ vfio_unpin_pages(dev, &gfn, 1);
+ return INTEL_GVT_INVALID_ADDR;
+ }
- gvt_cache_add(info->vgpu, gfn, pfn);
- return pfn;
+ gvt_cache_add(info->vgpu, gfn, iova);
+ return iova;
}
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
@@ -1459,7 +1456,6 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
}
struct intel_gvt_mpt kvmgt_mpt = {
- .detect_host = kvmgt_detect_host,
.host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit,
.attach_vgpu = kvmgt_attach_vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 1af5830c0a56..419353624c5a 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -44,18 +44,6 @@
*/
/**
- * intel_gvt_hypervisor_detect_host - check if GVT-g is running within
- * hypervisor host/privilged domain
- *
- * Returns:
- * Zero on success, -ENODEV if current kernel is running inside a VM
- */
-static inline int intel_gvt_hypervisor_detect_host(void)
-{
- return intel_gvt_host.mpt->detect_host();
-}
-
-/**
* intel_gvt_hypervisor_host_init - init GVT-g host side
*
* Returns:
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 44136b1f3aab..2b3a642284b6 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -236,12 +236,18 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
}
}
+#define CTX_CONTEXT_CONTROL_VAL 0x03
+
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
u32 v;
int i, array_size;
+ u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
+ u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
+ u32 inhibit_mask =
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
mmio = gen9_render_mmio_list;
@@ -257,6 +263,17 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
continue;
mmio->value = I915_READ(mmio->reg);
+
+ /*
+ * if it is an inhibit context, load in_context mmio
+ * into HW by mmio write. If it is not, skip this mmio
+ * write.
+ */
+ if (mmio->in_context &&
+ ((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
+ i915.enable_execlists)
+ continue;
+
if (mmio->mask)
v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
else
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 678b0be85376..06c9584ac5f0 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -125,7 +125,6 @@ static void tbs_sched_func(struct work_struct *work)
vgpu_data = scheduler->current_vgpu->sched_data;
head = &vgpu_data->list;
} else {
- gvt_dbg_sched("no current vgpu search from q head\n");
head = &sched_data->runq_head;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e91885dffeff..d6b6d0efdd1a 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -169,7 +169,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
- shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
+ shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
+ shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -456,7 +457,7 @@ static int workload_thread(void *priv)
}
complete:
- gvt_dbg_sched("will complete workload %p\n, status: %d\n",
+ gvt_dbg_sched("will complete workload %p, status: %d\n",
workload, workload->status);
if (workload->req)
@@ -549,18 +550,10 @@ err:
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-
atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
- mutex_lock(&dev_priv->drm.struct_mutex);
-
- /* a little hacky to mark as ctx closed */
- vgpu->shadow_ctx->closed = true;
- i915_gem_context_put(vgpu->shadow_ctx);
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_context_put_unlocked(vgpu->shadow_ctx);
}
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 7295bc8e12fb..95a97aa0051e 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -74,7 +74,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
unsigned int num_types;
- unsigned int i, low_avail;
+ unsigned int i, low_avail, high_avail;
unsigned int min_low;
/* vGPU type name is defined as GVTg_Vx_y which contains
@@ -89,9 +89,9 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
* to indicate how many vGPU instance can be created for this
* type.
*
- * Currently use static size here as we init type earlier..
*/
- low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
+ low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+ high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
num_types = 4;
gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
@@ -106,7 +106,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].low_gm_size = min_low;
gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
gvt->types[i].fence = 4;
- gvt->types[i].max_instance = low_avail / min_low;
+ gvt->types[i].max_instance = min(low_avail / min_low,
+ high_avail / gvt->types[i].high_gm_size);
gvt->types[i].avail_instance = gvt->types[i].max_instance;
if (IS_GEN8(gvt->dev_priv))
@@ -142,9 +143,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
/* Need to depend on maxium hw resource size but keep on
* static config for now.
*/
- low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
+ low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
gvt->gm.vgpu_allocated_low_gm_size;
- high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
+ high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
gvt->gm.vgpu_allocated_high_gm_size;
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
gvt->fence.vgpu_allocated_fence_num;
@@ -384,6 +385,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_resource(vgpu);
intel_vgpu_reset_mmio(vgpu);
populate_pvinfo_page(vgpu);
+ intel_vgpu_reset_display(vgpu);
if (dmlr)
intel_vgpu_reset_cfg_space(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index f5039f4f988f..21b1cd917d81 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -86,6 +86,102 @@
* general bitmasking mechanism.
*/
+/*
+ * A command that requires special handling by the command parser.
+ */
+struct drm_i915_cmd_descriptor {
+ /*
+ * Flags describing how the command parser processes the command.
+ *
+ * CMD_DESC_FIXED: The command has a fixed length if this is set,
+ * a length mask if not set
+ * CMD_DESC_SKIP: The command is allowed but does not follow the
+ * standard length encoding for the opcode range in
+ * which it falls
+ * CMD_DESC_REJECT: The command is never allowed
+ * CMD_DESC_REGISTER: The command should be checked against the
+ * register whitelist for the appropriate ring
+ * CMD_DESC_MASTER: The command is allowed if the submitting process
+ * is the DRM master
+ */
+ u32 flags;
+#define CMD_DESC_FIXED (1<<0)
+#define CMD_DESC_SKIP (1<<1)
+#define CMD_DESC_REJECT (1<<2)
+#define CMD_DESC_REGISTER (1<<3)
+#define CMD_DESC_BITMASK (1<<4)
+#define CMD_DESC_MASTER (1<<5)
+
+ /*
+ * The command's unique identification bits and the bitmask to get them.
+ * This isn't strictly the opcode field as defined in the spec and may
+ * also include type, subtype, and/or subop fields.
+ */
+ struct {
+ u32 value;
+ u32 mask;
+ } cmd;
+
+ /*
+ * The command's length. The command is either fixed length (i.e. does
+ * not include a length field) or has a length field mask. The flag
+ * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
+ * a length mask. All command entries in a command table must include
+ * length information.
+ */
+ union {
+ u32 fixed;
+ u32 mask;
+ } length;
+
+ /*
+ * Describes where to find a register address in the command to check
+ * against the ring's register whitelist. Only valid if flags has the
+ * CMD_DESC_REGISTER bit set.
+ *
+ * A non-zero step value implies that the command may access multiple
+ * registers in sequence (e.g. LRI), in that case step gives the
+ * distance in dwords between individual offset fields.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 step;
+ } reg;
+
+#define MAX_CMD_DESC_BITMASKS 3
+ /*
+ * Describes command checks where a particular dword is masked and
+ * compared against an expected value. If the command does not match
+ * the expected value, the parser rejects it. Only valid if flags has
+ * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
+ * are valid.
+ *
+ * If the check specifies a non-zero condition_mask then the parser
+ * only performs the check when the bits specified by condition_mask
+ * are non-zero.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 expected;
+ u32 condition_offset;
+ u32 condition_mask;
+ } bits[MAX_CMD_DESC_BITMASKS];
+};
+
+/*
+ * A table of commands requiring special handling by the command parser.
+ *
+ * Each engine has an array of tables. Each table consists of an array of
+ * command descriptors, which must be sorted with command opcodes in
+ * ascending order.
+ */
+struct drm_i915_cmd_table {
+ const struct drm_i915_cmd_descriptor *table;
+ int count;
+};
+
#define STD_MI_OPCODE_SHIFT (32 - 9)
#define STD_3D_OPCODE_SHIFT (32 - 16)
#define STD_2D_OPCODE_SHIFT (32 - 10)
@@ -450,7 +546,6 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
- REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
REG32(GEN7_3DPRIM_END_OFFSET),
@@ -559,7 +654,7 @@ static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
- u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
@@ -578,7 +673,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
{
- u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
@@ -601,7 +696,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
{
- u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
if (client == INSTR_MI_CLIENT)
return 0x3F;
@@ -984,7 +1079,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
src = ERR_PTR(-ENODEV);
if (src_needs_clflush &&
- i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, NULL, 0)) {
+ i915_can_memcpy_from_wc(NULL, batch_start_offset, 0)) {
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
if (!IS_ERR(src)) {
i915_memcpy_from_wc(dst,
@@ -1036,32 +1131,10 @@ unpin_src:
return dst;
}
-/**
- * intel_engine_needs_cmd_parser() - should a given engine use software
- * command parsing?
- * @engine: the engine in question
- *
- * Only certain platforms require software batch buffer command parsing, and
- * only when enabled via module parameter.
- *
- * Return: true if the engine requires software command parsing
- */
-bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
-{
- if (!engine->needs_cmd_parser)
- return false;
-
- if (!USES_PPGTT(engine->i915))
- return false;
-
- return (i915.enable_cmd_parser == 1);
-}
-
static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length,
- const bool is_master,
- bool *oacontrol_set)
+ const bool is_master)
{
if (desc->flags & CMD_DESC_SKIP)
return true;
@@ -1099,31 +1172,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
}
/*
- * OACONTROL requires some special handling for
- * writes. We want to make sure that any batch which
- * enables OA also disables it before the end of the
- * batch. The goal is to prevent one process from
- * snooping on the perf data from another process. To do
- * that, we need to check the value that will be written
- * to the register. Hence, limit OACONTROL writes to
- * only MI_LOAD_REGISTER_IMM commands.
- */
- if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
- return false;
- }
-
- if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
- return false;
- }
-
- if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
- *oacontrol_set = (cmd[offset + 1] != 0);
- }
-
- /*
* Check the value written to the register against the
* allowed mask/value pair given in the whitelist entry.
*/
@@ -1214,7 +1262,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
u32 *cmd, *batch_end;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
- bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
bool needs_clflush_after = false;
int ret = 0;
@@ -1270,20 +1317,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
break;
}
- if (!check_cmd(engine, desc, cmd, length, is_master,
- &oacontrol_set)) {
- ret = -EINVAL;
+ if (!check_cmd(engine, desc, cmd, length, is_master)) {
+ ret = -EACCES;
break;
}
cmd += length;
}
- if (oacontrol_set) {
- DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
- ret = -EINVAL;
- }
-
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
@@ -1313,7 +1354,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv, id) {
- if (intel_engine_needs_cmd_parser(engine)) {
+ if (engine->needs_cmd_parser) {
active = true;
break;
}
@@ -1333,6 +1374,11 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
* 5. GPGPU dispatch compute indirect registers.
* 6. TIMESTAMP register and Haswell CS GPR registers
* 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
+ * 8. Don't report cmd_check() failures as EINVAL errors to userspace;
+ * rely on the HW to NOOP disallowed commands as it would without
+ * the parser enabled.
+ * 9. Don't whitelist or handle oacontrol specially, as ownership
+ * for oacontrol state is moving to i915-perf.
*/
- return 7;
+ return 9;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 791bfc760075..fa69d72fdcb9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -26,19 +26,9 @@
*
*/
-#include <linux/seq_file.h>
-#include <linux/circ_buf.h>
-#include <linux/ctype.h>
#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/export.h>
#include <linux/list_sort.h>
-#include <asm/msr-index.h>
-#include <drm/drmP.h>
#include "intel_drv.h"
-#include "intel_ringbuffer.h"
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -77,6 +67,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
const struct intel_device_info *info = INTEL_INFO(dev_priv);
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
+ seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
@@ -168,8 +159,35 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
i915_vma_is_ggtt(vma) ? "g" : "pp",
vma->node.start, vma->node.size);
- if (i915_vma_is_ggtt(vma))
- seq_printf(m, ", type: %u", vma->ggtt_view.type);
+ if (i915_vma_is_ggtt(vma)) {
+ switch (vma->ggtt_view.type) {
+ case I915_GGTT_VIEW_NORMAL:
+ seq_puts(m, ", normal");
+ break;
+
+ case I915_GGTT_VIEW_PARTIAL:
+ seq_printf(m, ", partial [%08llx+%x]",
+ vma->ggtt_view.partial.offset << PAGE_SHIFT,
+ vma->ggtt_view.partial.size << PAGE_SHIFT);
+ break;
+
+ case I915_GGTT_VIEW_ROTATED:
+ seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
+ vma->ggtt_view.rotated.plane[0].width,
+ vma->ggtt_view.rotated.plane[0].height,
+ vma->ggtt_view.rotated.plane[0].stride,
+ vma->ggtt_view.rotated.plane[0].offset,
+ vma->ggtt_view.rotated.plane[1].width,
+ vma->ggtt_view.rotated.plane[1].height,
+ vma->ggtt_view.rotated.plane[1].stride,
+ vma->ggtt_view.rotated.plane[1].offset);
+ break;
+
+ default:
+ MISSING_CASE(vma->ggtt_view.type);
+ break;
+ }
+ }
if (vma->fence)
seq_printf(m, " , fence: %d%s",
vma->fence->id,
@@ -549,10 +567,10 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
if (work->flip_queued_req) {
struct intel_engine_cs *engine = work->flip_queued_req->engine;
- seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
+ seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
engine->name,
work->flip_queued_req->global_seqno,
- atomic_read(&dev_priv->gt.global_timeline.next_seqno),
+ intel_engine_last_submit(engine),
intel_engine_get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req));
} else
@@ -686,7 +704,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = container_of(rb, typeof(*w), node);
+ struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
@@ -946,7 +964,7 @@ i915_error_state_write(struct file *filp,
struct i915_error_state_file_priv *error_priv = filp->private_data;
DRM_DEBUG_DRIVER("Resetting error state\n");
- i915_destroy_error_state(error_priv->dev);
+ i915_destroy_error_state(error_priv->i915);
return cnt;
}
@@ -960,7 +978,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
if (!error_priv)
return -ENOMEM;
- error_priv->dev = &dev_priv->drm;
+ error_priv->i915 = dev_priv;
i915_error_state_get(&dev_priv->drm, error_priv);
@@ -988,8 +1006,8 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
ssize_t ret_count = 0;
int ret;
- ret = i915_error_state_buf_init(&error_str,
- to_i915(error_priv->dev), count, *pos);
+ ret = i915_error_state_buf_init(&error_str, error_priv->i915,
+ count, *pos);
if (ret)
return ret;
@@ -1026,7 +1044,7 @@ i915_next_seqno_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- *val = 1 + atomic_read(&dev_priv->gt.global_timeline.next_seqno);
+ *val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno);
return 0;
}
@@ -1108,7 +1126,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
int max_freq;
rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
} else {
@@ -1204,7 +1222,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
- max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 0 :
+ max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
@@ -1217,7 +1235,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
- max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 16 :
+ max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
@@ -1330,13 +1348,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno, seqno[id],
intel_engine_last_submit(engine));
- seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
+ seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
- &dev_priv->gpu_error.missed_irq_rings)));
+ &dev_priv->gpu_error.missed_irq_rings)),
+ yesno(engine->hangcheck.stalled));
+
spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = container_of(rb, typeof(*w), node);
+ struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
@@ -1346,8 +1366,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
- seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
- seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
+ seq_printf(m, "\taction = %s(%d) %d ms ago\n",
+ hangcheck_action_to_str(engine->hangcheck.action),
+ engine->hangcheck.action,
+ jiffies_to_msecs(jiffies -
+ engine->hangcheck.action_timestamp));
if (engine->id == RCS) {
seq_puts(m, "\tinstdone read =\n");
@@ -1728,7 +1751,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
- else if (IS_CRESTLINE(dev_priv) || IS_G4X(dev_priv) ||
+ else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
IS_I945G(dev_priv) || IS_I945GM(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
else if (IS_I915GM(dev_priv))
@@ -1873,8 +1896,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
fbdev_fb->base.height,
- fbdev_fb->base.depth,
- fbdev_fb->base.bits_per_pixel,
+ fbdev_fb->base.format->depth,
+ fbdev_fb->base.format->cpp[0] * 8,
fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
describe_obj(m, fbdev_fb->obj);
@@ -1891,8 +1914,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fb->base.width,
fb->base.height,
- fb->base.depth,
- fb->base.bits_per_pixel,
+ fb->base.format->depth,
+ fb->base.format->cpp[0] * 8,
fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
describe_obj(m, fb->obj);
@@ -2329,10 +2352,40 @@ static int i915_llc(struct seq_file *m, void *data)
return 0;
}
+static int i915_huc_load_status_info(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+
+ if (!HAS_HUC_UCODE(dev_priv))
+ return 0;
+
+ seq_puts(m, "HuC firmware status:\n");
+ seq_printf(m, "\tpath: %s\n", huc_fw->path);
+ seq_printf(m, "\tfetch: %s\n",
+ intel_uc_fw_status_repr(huc_fw->fetch_status));
+ seq_printf(m, "\tload: %s\n",
+ intel_uc_fw_status_repr(huc_fw->load_status));
+ seq_printf(m, "\tversion wanted: %d.%d\n",
+ huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
+ seq_printf(m, "\tversion found: %d.%d\n",
+ huc_fw->major_ver_found, huc_fw->minor_ver_found);
+ seq_printf(m, "\theader: offset is %d; size = %d\n",
+ huc_fw->header_offset, huc_fw->header_size);
+ seq_printf(m, "\tuCode: offset is %d; size = %d\n",
+ huc_fw->ucode_offset, huc_fw->ucode_size);
+ seq_printf(m, "\tRSA: offset is %d; size = %d\n",
+ huc_fw->rsa_offset, huc_fw->rsa_size);
+
+ seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
+
+ return 0;
+}
+
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
u32 tmp, i;
if (!HAS_GUC_UCODE(dev_priv))
@@ -2340,15 +2393,15 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
seq_printf(m, "GuC firmware status:\n");
seq_printf(m, "\tpath: %s\n",
- guc_fw->guc_fw_path);
+ guc_fw->path);
seq_printf(m, "\tfetch: %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+ intel_uc_fw_status_repr(guc_fw->fetch_status));
seq_printf(m, "\tload: %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+ intel_uc_fw_status_repr(guc_fw->load_status));
seq_printf(m, "\tversion wanted: %d.%d\n",
- guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+ guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
seq_printf(m, "\tversion found: %d.%d\n",
- guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
+ guc_fw->major_ver_found, guc_fw->minor_ver_found);
seq_printf(m, "\theader: offset is %d; size = %d\n",
guc_fw->header_offset, guc_fw->header_size);
seq_printf(m, "\tuCode: offset is %d; size = %d\n",
@@ -2409,7 +2462,7 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
client->priority, client->ctx_index, client->proc_desc_offset);
seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
- client->doorbell_id, client->doorbell_offset, client->cookie);
+ client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
client->wq_size, client->wq_offset, client->wq_tail);
@@ -2429,47 +2482,41 @@ static void i915_guc_client_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_guc guc;
- struct i915_guc_client client = {};
+ const struct intel_guc *guc = &dev_priv->guc;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u64 total = 0;
+ u64 total;
- if (!HAS_GUC_SCHED(dev_priv))
+ if (!guc->execbuf_client) {
+ seq_printf(m, "GuC submission %s\n",
+ HAS_GUC_SCHED(dev_priv) ?
+ "disabled" :
+ "not supported");
return 0;
-
- if (mutex_lock_interruptible(&dev->struct_mutex))
- return 0;
-
- /* Take a local copy of the GuC data, so we can dump it at leisure */
- guc = dev_priv->guc;
- if (guc.execbuf_client)
- client = *guc.execbuf_client;
-
- mutex_unlock(&dev->struct_mutex);
+ }
seq_printf(m, "Doorbell map:\n");
- seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap);
- seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline);
+ seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap);
+ seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
- seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
- seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
- seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
- seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
- seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
+ seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
+ seq_printf(m, "GuC action failure count: %u\n", guc->action_fail);
+ seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd);
+ seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status);
+ seq_printf(m, "GuC last action error code: %d\n", guc->action_err);
+ total = 0;
seq_printf(m, "\nGuC submissions:\n");
for_each_engine(engine, dev_priv, id) {
- u64 submissions = guc.submissions[id];
+ u64 submissions = guc->submissions[id];
total += submissions;
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
- engine->name, submissions, guc.last_seqno[id]);
+ engine->name, submissions, guc->last_seqno[id]);
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
- seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
- i915_guc_client_info(m, dev_priv, &client);
+ seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
+ i915_guc_client_info(m, dev_priv, guc->execbuf_client);
i915_guc_log_info(m, dev_priv);
@@ -2542,6 +2589,29 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
i915_guc_log_control_get, i915_guc_log_control_set,
"%lld\n");
+static const char *psr2_live_status(u32 val)
+{
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+
+ val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status))
+ return live_status[val];
+
+ return "unknown";
+}
+
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2567,9 +2637,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- if (HAS_DDI(dev_priv))
- enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
- else {
+ if (HAS_DDI(dev_priv)) {
+ if (dev_priv->psr.psr2_support)
+ enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+ else
+ enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+ } else {
for_each_pipe(dev_priv, pipe) {
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
@@ -2613,6 +2686,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
+ if (dev_priv->psr.psr2_support) {
+ u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);
+
+ seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
+ psr2, psr2_live_status(psr2));
+ }
mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
@@ -2872,6 +2951,20 @@ static void intel_dp_info(struct seq_file *m,
&intel_dp->aux);
}
+static void intel_dp_mst_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ struct intel_dp_mst_encoder *intel_mst =
+ enc_to_mst(&intel_encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
+ intel_connector->port);
+
+ seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
+}
+
static void intel_hdmi_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
@@ -2914,7 +3007,10 @@ static void intel_connector_info(struct seq_file *m,
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- intel_dp_info(m, intel_connector);
+ if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ intel_dp_mst_info(m, intel_connector);
+ else
+ intel_dp_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_LVDS:
if (intel_encoder->type == INTEL_OUTPUT_LVDS)
@@ -2938,7 +3034,7 @@ static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
{
u32 state;
- if (IS_845G(dev_priv) || IS_I865G(dev_priv))
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -3021,7 +3117,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
state = plane->state;
if (state->fb) {
- drm_get_format_name(state->fb->pixel_format, &format_name);
+ drm_get_format_name(state->fb->format->format,
+ &format_name);
} else {
sprintf(format_name.str, "N/A");
}
@@ -3059,7 +3156,7 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id);
- for (i = 0; i < SKL_NUM_SCALERS; i++) {
+ for (i = 0; i < num_scalers; i++) {
struct intel_scaler *sc =
&pipe_config->scaler_state.scalers[i];
@@ -3141,11 +3238,11 @@ static int i915_engine_info(struct seq_file *m, void *unused)
u64 addr;
seq_printf(m, "%s\n", engine->name);
- seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
+ seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
- engine->hangcheck.score);
+ jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
rcu_read_lock();
@@ -3251,7 +3348,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = container_of(rb, typeof(*w), node);
+ struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
@@ -3341,14 +3438,14 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
- pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
+ pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
+ seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
seq_printf(m, " dpll_md: 0x%08x\n",
- pll->config.hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll);
+ pll->state.hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
}
drm_modeset_unlock_all(dev);
@@ -3526,12 +3623,6 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
return 0;
}
-struct pipe_crc_info {
- const char *name;
- struct drm_i915_private *dev_priv;
- enum pipe pipe;
-};
-
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3561,844 +3652,6 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
-{
- struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
- if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
- return -ENODEV;
-
- spin_lock_irq(&pipe_crc->lock);
-
- if (pipe_crc->opened) {
- spin_unlock_irq(&pipe_crc->lock);
- return -EBUSY; /* already open */
- }
-
- pipe_crc->opened = true;
- filep->private_data = inode->i_private;
-
- spin_unlock_irq(&pipe_crc->lock);
-
- return 0;
-}
-
-static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
-{
- struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
- spin_lock_irq(&pipe_crc->lock);
- pipe_crc->opened = false;
- spin_unlock_irq(&pipe_crc->lock);
-
- return 0;
-}
-
-/* (6 fields, 8 chars each, space separated (5) + '\n') */
-#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
-/* account for \'0' */
-#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
-
-static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
-{
- assert_spin_locked(&pipe_crc->lock);
- return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
- INTEL_PIPE_CRC_ENTRIES_NR);
-}
-
-static ssize_t
-i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
- loff_t *pos)
-{
- struct pipe_crc_info *info = filep->private_data;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
- char buf[PIPE_CRC_BUFFER_LEN];
- int n_entries;
- ssize_t bytes_read;
-
- /*
- * Don't allow user space to provide buffers not big enough to hold
- * a line of data.
- */
- if (count < PIPE_CRC_LINE_LEN)
- return -EINVAL;
-
- if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
- return 0;
-
- /* nothing to read */
- spin_lock_irq(&pipe_crc->lock);
- while (pipe_crc_data_count(pipe_crc) == 0) {
- int ret;
-
- if (filep->f_flags & O_NONBLOCK) {
- spin_unlock_irq(&pipe_crc->lock);
- return -EAGAIN;
- }
-
- ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
- pipe_crc_data_count(pipe_crc), pipe_crc->lock);
- if (ret) {
- spin_unlock_irq(&pipe_crc->lock);
- return ret;
- }
- }
-
- /* We now have one or more entries to read */
- n_entries = count / PIPE_CRC_LINE_LEN;
-
- bytes_read = 0;
- while (n_entries > 0) {
- struct intel_pipe_crc_entry *entry =
- &pipe_crc->entries[pipe_crc->tail];
-
- if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
- INTEL_PIPE_CRC_ENTRIES_NR) < 1)
- break;
-
- BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
- pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
-
- bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
- "%8u %8x %8x %8x %8x %8x\n",
- entry->frame, entry->crc[0],
- entry->crc[1], entry->crc[2],
- entry->crc[3], entry->crc[4]);
-
- spin_unlock_irq(&pipe_crc->lock);
-
- if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
- return -EFAULT;
-
- user_buf += PIPE_CRC_LINE_LEN;
- n_entries--;
-
- spin_lock_irq(&pipe_crc->lock);
- }
-
- spin_unlock_irq(&pipe_crc->lock);
-
- return bytes_read;
-}
-
-static const struct file_operations i915_pipe_crc_fops = {
- .owner = THIS_MODULE,
- .open = i915_pipe_crc_open,
- .read = i915_pipe_crc_read,
- .release = i915_pipe_crc_release,
-};
-
-static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
- {
- .name = "i915_pipe_A_crc",
- .pipe = PIPE_A,
- },
- {
- .name = "i915_pipe_B_crc",
- .pipe = PIPE_B,
- },
- {
- .name = "i915_pipe_C_crc",
- .pipe = PIPE_C,
- },
-};
-
-static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
- enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(minor->dev);
- struct dentry *ent;
- struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
-
- info->dev_priv = dev_priv;
- ent = debugfs_create_file(info->name, S_IRUGO, root, info,
- &i915_pipe_crc_fops);
- if (!ent)
- return -ENOMEM;
-
- return drm_add_fake_info_node(minor, ent, info);
-}
-
-static const char * const pipe_crc_sources[] = {
- "none",
- "plane1",
- "plane2",
- "pf",
- "pipe",
- "TV",
- "DP-B",
- "DP-C",
- "DP-D",
- "auto",
-};
-
-static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
-{
- BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
- return pipe_crc_sources[source];
-}
-
-static int display_crc_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- int i;
-
- for (i = 0; i < I915_MAX_PIPES; i++)
- seq_printf(m, "%c %s\n", pipe_name(i),
- pipe_crc_source_name(dev_priv->pipe_crc[i].source));
-
- return 0;
-}
-
-static int display_crc_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, display_crc_ctl_show, inode->i_private);
-}
-
-static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
- uint32_t *val)
-{
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
- switch (*source) {
- case INTEL_PIPE_CRC_SOURCE_PIPE:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
- break;
- case INTEL_PIPE_CRC_SOURCE_NONE:
- *val = 0;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_encoder *encoder;
- struct intel_crtc *crtc;
- struct intel_digital_port *dig_port;
- int ret = 0;
-
- *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
- drm_modeset_lock_all(dev);
- for_each_intel_encoder(dev, encoder) {
- if (!encoder->base.crtc)
- continue;
-
- crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->pipe != pipe)
- continue;
-
- switch (encoder->type) {
- case INTEL_OUTPUT_TVOUT:
- *source = INTEL_PIPE_CRC_SOURCE_TV;
- break;
- case INTEL_OUTPUT_DP:
- case INTEL_OUTPUT_EDP:
- dig_port = enc_to_dig_port(&encoder->base);
- switch (dig_port->port) {
- case PORT_B:
- *source = INTEL_PIPE_CRC_SOURCE_DP_B;
- break;
- case PORT_C:
- *source = INTEL_PIPE_CRC_SOURCE_DP_C;
- break;
- case PORT_D:
- *source = INTEL_PIPE_CRC_SOURCE_DP_D;
- break;
- default:
- WARN(1, "nonexisting DP port %c\n",
- port_name(dig_port->port));
- break;
- }
- break;
- default:
- break;
- }
- }
- drm_modeset_unlock_all(dev);
-
- return ret;
-}
-
-static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source,
- uint32_t *val)
-{
- bool need_stable_symbols = false;
-
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
- if (ret)
- return ret;
- }
-
- switch (*source) {
- case INTEL_PIPE_CRC_SOURCE_PIPE:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_B:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_C:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_CHERRYVIEW(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_NONE:
- *val = 0;
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * When the pipe CRC tap point is after the transcoders we need
- * to tweak symbol-level features to produce a deterministic series of
- * symbols for a given frame. We need to reset those features only once
- * a frame (instead of every nth symbol):
- * - DC-balance: used to ensure a better clock recovery from the data
- * link (SDVO)
- * - DisplayPort scrambling: used for EMI reduction
- */
- if (need_stable_symbols) {
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
-
- tmp |= DC_BALANCE_RESET_VLV;
- switch (pipe) {
- case PIPE_A:
- tmp |= PIPE_A_SCRAMBLE_RESET;
- break;
- case PIPE_B:
- tmp |= PIPE_B_SCRAMBLE_RESET;
- break;
- case PIPE_C:
- tmp |= PIPE_C_SCRAMBLE_RESET;
- break;
- default:
- return -EINVAL;
- }
- I915_WRITE(PORT_DFT2_G4X, tmp);
- }
-
- return 0;
-}
-
-static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source,
- uint32_t *val)
-{
- bool need_stable_symbols = false;
-
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
- if (ret)
- return ret;
- }
-
- switch (*source) {
- case INTEL_PIPE_CRC_SOURCE_PIPE:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
- break;
- case INTEL_PIPE_CRC_SOURCE_TV:
- if (!SUPPORTS_TV(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_B:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_C:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_NONE:
- *val = 0;
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * When the pipe CRC tap point is after the transcoders we need
- * to tweak symbol-level features to produce a deterministic series of
- * symbols for a given frame. We need to reset those features only once
- * a frame (instead of every nth symbol):
- * - DC-balance: used to ensure a better clock recovery from the data
- * link (SDVO)
- * - DisplayPort scrambling: used for EMI reduction
- */
- if (need_stable_symbols) {
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
-
- WARN_ON(!IS_G4X(dev_priv));
-
- I915_WRITE(PORT_DFT_I9XX,
- I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
-
- if (pipe == PIPE_A)
- tmp |= PIPE_A_SCRAMBLE_RESET;
- else
- tmp |= PIPE_B_SCRAMBLE_RESET;
-
- I915_WRITE(PORT_DFT2_G4X, tmp);
- }
-
- return 0;
-}
-
-static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
-
- switch (pipe) {
- case PIPE_A:
- tmp &= ~PIPE_A_SCRAMBLE_RESET;
- break;
- case PIPE_B:
- tmp &= ~PIPE_B_SCRAMBLE_RESET;
- break;
- case PIPE_C:
- tmp &= ~PIPE_C_SCRAMBLE_RESET;
- break;
- default:
- return;
- }
- if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
- tmp &= ~DC_BALANCE_RESET_VLV;
- I915_WRITE(PORT_DFT2_G4X, tmp);
-
-}
-
-static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
-
- if (pipe == PIPE_A)
- tmp &= ~PIPE_A_SCRAMBLE_RESET;
- else
- tmp &= ~PIPE_B_SCRAMBLE_RESET;
- I915_WRITE(PORT_DFT2_G4X, tmp);
-
- if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
- I915_WRITE(PORT_DFT_I9XX,
- I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
- }
-}
-
-static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
- uint32_t *val)
-{
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
- switch (*source) {
- case INTEL_PIPE_CRC_SOURCE_PLANE1:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
- break;
- case INTEL_PIPE_CRC_SOURCE_PLANE2:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
- break;
- case INTEL_PIPE_CRC_SOURCE_PIPE:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
- break;
- case INTEL_PIPE_CRC_SOURCE_NONE:
- *val = 0;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
- bool enable)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
- struct intel_crtc_state *pipe_config;
- struct drm_atomic_state *state;
- int ret = 0;
-
- drm_modeset_lock_all(dev);
- state = drm_atomic_state_alloc(dev);
- if (!state) {
- ret = -ENOMEM;
- goto out;
- }
-
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
- pipe_config = intel_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(pipe_config)) {
- ret = PTR_ERR(pipe_config);
- goto out;
- }
-
- pipe_config->pch_pfit.force_thru = enable;
- if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
- pipe_config->pch_pfit.enabled != enable)
- pipe_config->base.connectors_changed = true;
-
- ret = drm_atomic_commit(state);
-out:
- WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
- drm_modeset_unlock_all(dev);
- drm_atomic_state_put(state);
-}
-
-static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source,
- uint32_t *val)
-{
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- *source = INTEL_PIPE_CRC_SOURCE_PF;
-
- switch (*source) {
- case INTEL_PIPE_CRC_SOURCE_PLANE1:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
- break;
- case INTEL_PIPE_CRC_SOURCE_PLANE2:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
- break;
- case INTEL_PIPE_CRC_SOURCE_PF:
- if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
-
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
- break;
- case INTEL_PIPE_CRC_SOURCE_NONE:
- *val = 0;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source source)
-{
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- enum intel_display_power_domain power_domain;
- u32 val = 0; /* shut up gcc */
- int ret;
-
- if (pipe_crc->source == source)
- return 0;
-
- /* forbid changing the source without going back to 'none' */
- if (pipe_crc->source && source)
- return -EINVAL;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
- DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
- return -EIO;
- }
-
- if (IS_GEN2(dev_priv))
- ret = i8xx_pipe_crc_ctl_reg(&source, &val);
- else if (INTEL_GEN(dev_priv) < 5)
- ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
- else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
- ret = ilk_pipe_crc_ctl_reg(&source, &val);
- else
- ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
-
- if (ret != 0)
- goto out;
-
- /* none -> real source transition */
- if (source) {
- struct intel_pipe_crc_entry *entries;
-
- DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
- pipe_name(pipe), pipe_crc_source_name(source));
-
- entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
- sizeof(pipe_crc->entries[0]),
- GFP_KERNEL);
- if (!entries) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- hsw_disable_ips(crtc);
-
- spin_lock_irq(&pipe_crc->lock);
- kfree(pipe_crc->entries);
- pipe_crc->entries = entries;
- pipe_crc->head = 0;
- pipe_crc->tail = 0;
- spin_unlock_irq(&pipe_crc->lock);
- }
-
- pipe_crc->source = source;
-
- I915_WRITE(PIPE_CRC_CTL(pipe), val);
- POSTING_READ(PIPE_CRC_CTL(pipe));
-
- /* real source -> none transition */
- if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
- struct intel_pipe_crc_entry *entries;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
-
- DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
- pipe_name(pipe));
-
- drm_modeset_lock(&crtc->base.mutex, NULL);
- if (crtc->base.state->active)
- intel_wait_for_vblank(dev_priv, pipe);
- drm_modeset_unlock(&crtc->base.mutex);
-
- spin_lock_irq(&pipe_crc->lock);
- entries = pipe_crc->entries;
- pipe_crc->entries = NULL;
- pipe_crc->head = 0;
- pipe_crc->tail = 0;
- spin_unlock_irq(&pipe_crc->lock);
-
- kfree(entries);
-
- if (IS_G4X(dev_priv))
- g4x_undo_pipe_scramble_reset(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, pipe);
- else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
-
- hsw_enable_ips(crtc);
- }
-
- ret = 0;
-
-out:
- intel_display_power_put(dev_priv, power_domain);
-
- return ret;
-}
-
-/*
- * Parse pipe CRC command strings:
- * command: wsp* object wsp+ name wsp+ source wsp*
- * object: 'pipe'
- * name: (A | B | C)
- * source: (none | plane1 | plane2 | pf)
- * wsp: (#0x20 | #0x9 | #0xA)+
- *
- * eg.:
- * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
- * "pipe A none" -> Stop CRC
- */
-static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
-{
- int n_words = 0;
-
- while (*buf) {
- char *end;
-
- /* skip leading white space */
- buf = skip_spaces(buf);
- if (!*buf)
- break; /* end of buffer */
-
- /* find end of word */
- for (end = buf; *end && !isspace(*end); end++)
- ;
-
- if (n_words == max_words) {
- DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
- max_words);
- return -EINVAL; /* ran out of words[] before bytes */
- }
-
- if (*end)
- *end++ = '\0';
- words[n_words++] = buf;
- buf = end;
- }
-
- return n_words;
-}
-
-enum intel_pipe_crc_object {
- PIPE_CRC_OBJECT_PIPE,
-};
-
-static const char * const pipe_crc_objects[] = {
- "pipe",
-};
-
-static int
-display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
- if (!strcmp(buf, pipe_crc_objects[i])) {
- *o = i;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
-{
- const char name = buf[0];
-
- if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
- return -EINVAL;
-
- *pipe = name - 'A';
-
- return 0;
-}
-
-static int
-display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
- if (!strcmp(buf, pipe_crc_sources[i])) {
- *s = i;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
- char *buf, size_t len)
-{
-#define N_WORDS 3
- int n_words;
- char *words[N_WORDS];
- enum pipe pipe;
- enum intel_pipe_crc_object object;
- enum intel_pipe_crc_source source;
-
- n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
- if (n_words != N_WORDS) {
- DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
- N_WORDS);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_object(words[0], &object) < 0) {
- DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
- DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_source(words[2], &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
- return -EINVAL;
- }
-
- return pipe_crc_set_source(dev_priv, pipe, source);
-}
-
-static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- char *tmpbuf;
- int ret;
-
- if (len == 0)
- return 0;
-
- if (len > PAGE_SIZE - 1) {
- DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
- PAGE_SIZE);
- return -E2BIG;
- }
-
- tmpbuf = kmalloc(len + 1, GFP_KERNEL);
- if (!tmpbuf)
- return -ENOMEM;
-
- if (copy_from_user(tmpbuf, ubuf, len)) {
- ret = -EFAULT;
- goto out;
- }
- tmpbuf[len] = '\0';
-
- ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
-
-out:
- kfree(tmpbuf);
- if (ret < 0)
- return ret;
-
- *offp += len;
- return len;
-}
-
-static const struct file_operations i915_display_crc_ctl_fops = {
- .owner = THIS_MODULE,
- .open = display_crc_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = display_crc_ctl_write
-};
-
static ssize_t i915_displayport_test_active_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
@@ -4446,9 +3699,9 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
* testing code, only accept an actual value of 1 here
*/
if (val == 1)
- intel_dp->compliance_test_active = 1;
+ intel_dp->compliance.test_active = 1;
else
- intel_dp->compliance_test_active = 0;
+ intel_dp->compliance.test_active = 0;
}
}
out:
@@ -4475,7 +3728,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
- if (intel_dp->compliance_test_active)
+ if (intel_dp->compliance.test_active)
seq_puts(m, "1");
else
seq_puts(m, "0");
@@ -4519,7 +3772,7 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
- seq_printf(m, "%lx", intel_dp->compliance_test_data);
+ seq_printf(m, "%lx", intel_dp->compliance.test_data.edid);
} else
seq_puts(m, "0");
}
@@ -4558,7 +3811,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
- seq_printf(m, "%02lx", intel_dp->compliance_test_type);
+ seq_printf(m, "%02lx", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
}
@@ -4957,7 +4210,7 @@ unlock:
if (val & DROP_FREED) {
synchronize_rcu();
- flush_work(&dev_priv->mm.free_work);
+ i915_gem_drain_freed_objects(dev_priv);
}
return ret;
@@ -5164,7 +4417,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
/* BXT has a single slice and at most 3 subslices. */
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
s_max = 1;
ss_max = 3;
}
@@ -5198,7 +4451,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
for (ss = 0; ss < ss_max; ss++) {
unsigned int eu_cnt;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
@@ -5386,6 +4639,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
{"i915_guc_log_dump", i915_guc_log_dump, 0},
+ {"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
@@ -5449,19 +4703,6 @@ static const struct i915_debugfs_files {
{"i915_guc_log_control", &i915_guc_log_control_fops}
};
-void intel_display_crc_init(struct drm_i915_private *dev_priv)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-
- pipe_crc->opened = false;
- spin_lock_init(&pipe_crc->lock);
- init_waitqueue_head(&pipe_crc->wq);
- }
-}
-
int i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
@@ -5471,11 +4712,9 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
- ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
- if (ret)
- return ret;
- }
+ ret = intel_pipe_crc_create(minor);
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
ret = i915_debugfs_create(minor->debugfs_root, minor,
@@ -5501,12 +4740,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
1, minor);
- for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
- struct drm_info_list *info_list =
- (struct drm_info_list *)&i915_pipe_crc_data[i];
-
- drm_debugfs_remove_files(info_list, 1, minor);
- }
+ intel_pipe_crc_cleanup(minor);
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
struct drm_info_list *info_list =
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 188eb7f1192d..e703556eba99 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,7 @@
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
+#include "intel_uc.h"
static struct drm_driver driver;
@@ -142,9 +143,8 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
return ret;
}
-static void intel_detect_pch(struct drm_device *dev)
+static void intel_detect_pch(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pch = NULL;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
@@ -317,6 +317,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_MIN_EU_IN_POOL:
value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
break;
+ case I915_PARAM_HUC_STATUS:
+ /* The register is already force-woken. We dont need
+ * any rpm here
+ */
+ value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
+ break;
case I915_PARAM_MMAP_GTT_VERSION:
/* Though we've started our numbering from 1, and so class all
* earlier versions as 0, in effect their value is undefined as
@@ -362,10 +368,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
return 0;
}
-static int i915_get_bridge_dev(struct drm_device *dev)
+static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
DRM_ERROR("bridge device not found\n");
@@ -376,9 +380,8 @@ static int i915_get_bridge_dev(struct drm_device *dev)
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
-intel_alloc_mchbar_resource(struct drm_device *dev)
+intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
@@ -422,9 +425,8 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
/* Setup MCHBAR if possible, return true if we should disable it again */
static void
-intel_setup_mchbar(struct drm_device *dev)
+intel_setup_mchbar(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -446,7 +448,7 @@ intel_setup_mchbar(struct drm_device *dev)
if (enabled)
return;
- if (intel_alloc_mchbar_resource(dev))
+ if (intel_alloc_mchbar_resource(dev_priv))
return;
dev_priv->mchbar_need_disable = true;
@@ -462,9 +464,8 @@ intel_setup_mchbar(struct drm_device *dev)
}
static void
-intel_teardown_mchbar(struct drm_device *dev)
+intel_teardown_mchbar(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
@@ -494,9 +495,9 @@ intel_teardown_mchbar(struct drm_device *dev)
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
- struct drm_device *dev = cookie;
+ struct drm_i915_private *dev_priv = cookie;
- intel_modeset_vga_set_state(to_i915(dev), state);
+ intel_modeset_vga_set_state(dev_priv, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -504,6 +505,9 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+static int i915_resume_switcheroo(struct drm_device *dev);
+static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
+
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -545,12 +549,11 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_cleanup_engines(&dev_priv->drm);
- i915_gem_context_fini(&dev_priv->drm);
+ i915_gem_cleanup_engines(dev_priv);
+ i915_gem_context_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
- rcu_barrier();
- flush_work(&dev_priv->mm.free_work);
+ i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->context_list));
}
@@ -575,7 +578,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
* then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV.
*/
- ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode);
+ ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
if (ret && ret != -ENODEV)
goto out;
@@ -596,7 +599,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_csr;
- intel_setup_gmbus(dev);
+ intel_setup_gmbus(dev_priv);
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
@@ -604,9 +607,10 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_irq;
- intel_guc_init(dev);
+ intel_huc_init(dev_priv);
+ intel_guc_init(dev_priv);
- ret = i915_gem_init(dev);
+ ret = i915_gem_init(dev_priv);
if (ret)
goto cleanup_irq;
@@ -627,13 +631,14 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
- if (i915_gem_suspend(dev))
+ if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv);
cleanup_irq:
- intel_guc_fini(dev);
+ intel_guc_fini(dev_priv);
+ intel_huc_fini(dev_priv);
drm_irq_uninstall(dev);
- intel_teardown_gmbus(dev);
+ intel_teardown_gmbus(dev_priv);
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
intel_power_domains_fini(dev_priv);
@@ -644,7 +649,6 @@ out:
return ret;
}
-#if IS_ENABLED(CONFIG_FB)
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
@@ -669,12 +673,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
return ret;
}
-#else
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
-{
- return 0;
-}
-#endif
#if !defined(CONFIG_VGA_CONSOLE)
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
@@ -812,26 +810,25 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
+ spin_lock_init(&dev_priv->wm.dsparb_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
+ intel_uc_init_early(dev_priv);
+
i915_memcpy_init_early(dev_priv);
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
return ret;
- ret = intel_gvt_init(dev_priv);
- if (ret < 0)
- goto err_workqueues;
-
/* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(&dev_priv->drm);
+ intel_detect_pch(dev_priv);
- intel_pm_setup(&dev_priv->drm);
+ intel_pm_setup(dev_priv);
intel_init_dpio(dev_priv);
intel_power_domains_init(dev_priv);
intel_irq_init(dev_priv);
@@ -839,9 +836,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- ret = i915_gem_load_init(&dev_priv->drm);
+ ret = i915_gem_load_init(dev_priv);
if (ret < 0)
- goto err_gvt;
+ goto err_workqueues;
intel_display_crc_init(dev_priv);
@@ -849,10 +846,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_detect_preproduction_hw(dev_priv);
+ i915_perf_init(dev_priv);
+
return 0;
-err_gvt:
- intel_gvt_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@@ -864,13 +861,13 @@ err_workqueues:
*/
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
- i915_gem_load_cleanup(&dev_priv->drm);
+ i915_perf_fini(dev_priv);
+ i915_gem_load_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
}
-static int i915_mmio_setup(struct drm_device *dev)
+static int i915_mmio_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
int mmio_bar;
int mmio_size;
@@ -896,17 +893,16 @@ static int i915_mmio_setup(struct drm_device *dev)
}
/* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev);
+ intel_setup_mchbar(dev_priv);
return 0;
}
-static void i915_mmio_cleanup(struct drm_device *dev)
+static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
- intel_teardown_mchbar(dev);
+ intel_teardown_mchbar(dev_priv);
pci_iounmap(pdev, dev_priv->regs);
}
@@ -921,16 +917,15 @@ static void i915_mmio_cleanup(struct drm_device *dev)
*/
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
int ret;
if (i915_inject_load_failure())
return -ENODEV;
- if (i915_get_bridge_dev(dev))
+ if (i915_get_bridge_dev(dev_priv))
return -EIO;
- ret = i915_mmio_setup(dev);
+ ret = i915_mmio_setup(dev_priv);
if (ret < 0)
goto put_bridge;
@@ -950,10 +945,8 @@ put_bridge:
*/
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
-
intel_uncore_fini(dev_priv);
- i915_mmio_cleanup(dev);
+ i915_mmio_cleanup(dev_priv);
pci_dev_put(dev_priv->bridge_dev);
}
@@ -1044,7 +1037,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully.
*/
- if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
+ if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1079,6 +1072,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("can't enable MSI");
}
+ ret = intel_gvt_init(dev_priv);
+ if (ret)
+ goto out_ggtt;
+
return 0;
out_ggtt:
@@ -1125,8 +1122,11 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
- i915_guc_register(dev_priv);
+ i915_guc_log_register(dev_priv);
i915_setup_sysfs(dev_priv);
+
+ /* Depends on sysfs having been initialized */
+ i915_perf_register(dev_priv);
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
@@ -1163,8 +1163,10 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
acpi_video_unregister();
intel_opregion_unregister(dev_priv);
+ i915_perf_unregister(dev_priv);
+
i915_teardown_sysfs(dev_priv);
- i915_guc_unregister(dev_priv);
+ i915_guc_log_unregister(dev_priv);
i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
@@ -1195,8 +1197,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dev_priv)
ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
if (ret) {
- dev_printk(KERN_ERR, &pdev->dev,
- "[" DRM_NAME ":%s] allocation failed\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
kfree(dev_priv);
return ret;
}
@@ -1244,6 +1245,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_runtime_pm_enable(dev_priv);
+ dev_priv->ipc_enabled = false;
+
/* Everything is in place, we can now relax! */
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver.name, driver.major, driver.minor, driver.patchlevel,
@@ -1281,11 +1284,13 @@ void i915_driver_unload(struct drm_device *dev)
intel_fbdev_fini(dev);
- if (i915_gem_suspend(dev))
+ if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ intel_gvt_cleanup(dev_priv);
+
i915_driver_unregister(dev_priv);
drm_vblank_cleanup(dev);
@@ -1313,12 +1318,13 @@ void i915_driver_unload(struct drm_device *dev)
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- i915_destroy_error_state(dev);
+ i915_destroy_error_state(dev_priv);
/* Flush any outstanding unpin_work. */
drain_workqueue(dev_priv->wq);
- intel_guc_fini(dev);
+ intel_guc_fini(dev_priv);
+ intel_huc_fini(dev_priv);
i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
@@ -1423,14 +1429,14 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(pdev);
- error = i915_gem_suspend(dev);
+ error = i915_gem_suspend(dev_priv);
if (error) {
dev_err(&pdev->dev,
"GEM idle failed, resume might fail\n");
goto out;
}
- intel_guc_suspend(dev);
+ intel_guc_suspend(dev_priv);
intel_display_suspend(dev);
@@ -1445,7 +1451,7 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_gem_suspend_gtt_mappings(dev_priv);
- i915_save_state(dev);
+ i915_save_state(dev_priv);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
@@ -1476,7 +1482,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_display_set_init_power(dev_priv, false);
- fw_csr = !IS_BROXTON(dev_priv) &&
+ fw_csr = !IS_GEN9_LP(dev_priv) &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
@@ -1489,7 +1495,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_power_domains_suspend(dev_priv);
ret = 0;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
bxt_enable_dc9(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_enable_pc8(dev_priv);
@@ -1528,7 +1534,7 @@ out:
return ret;
}
-int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
+static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
{
int error;
@@ -1566,33 +1572,36 @@ static int i915_drm_resume(struct drm_device *dev)
intel_csr_ucode_resume(dev_priv);
- i915_gem_resume(dev);
+ i915_gem_resume(dev_priv);
- i915_restore_state(dev);
+ i915_restore_state(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
intel_opregion_setup(dev_priv);
- intel_init_pch_refclk(dev);
- drm_mode_config_reset(dev);
+ intel_init_pch_refclk(dev_priv);
/*
* Interrupts have to be enabled before any batches are run. If not the
* GPU will hang. i915_gem_init_hw() will initiate batches to
* update/restore the context.
*
+ * drm_mode_config_reset() needs AUX interrupts.
+ *
* Modeset enabling in intel_modeset_init_hw() also needs working
* interrupts.
*/
intel_runtime_pm_enable_interrupts(dev_priv);
+ drm_mode_config_reset(dev);
+
mutex_lock(&dev->struct_mutex);
- if (i915_gem_init_hw(dev)) {
+ if (i915_gem_init_hw(dev_priv)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
i915_gem_set_wedged(dev_priv);
}
mutex_unlock(&dev->struct_mutex);
- intel_guc_resume(dev);
+ intel_guc_resume(dev_priv);
intel_modeset_init_hw(dev);
@@ -1694,7 +1703,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_early_sanitize(dev_priv, true);
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
@@ -1704,7 +1713,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_sanitize(dev_priv);
- if (IS_BROXTON(dev_priv) ||
+ if (IS_GEN9_LP(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
@@ -1716,7 +1725,7 @@ out:
return ret;
}
-int i915_resume_switcheroo(struct drm_device *dev)
+static int i915_resume_switcheroo(struct drm_device *dev)
{
int ret;
@@ -1730,25 +1739,9 @@ int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
-static void disable_engines_irq(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* Ensure irq handler finishes, and not run again. */
- disable_irq(dev_priv->drm.irq);
- for_each_engine(engine, dev_priv, id)
- tasklet_kill(&engine->irq_tasklet);
-}
-
-static void enable_engines_irq(struct drm_i915_private *dev_priv)
-{
- enable_irq(dev_priv->drm.irq);
-}
-
/**
* i915_reset - reset chip after a hang
- * @dev: drm device to reset
+ * @dev_priv: device private to reset
*
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
@@ -1765,11 +1758,10 @@ static void enable_engines_irq(struct drm_i915_private *dev_priv)
*/
void i915_reset(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct i915_gpu_error *error = &dev_priv->gpu_error;
int ret;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
return;
@@ -1779,11 +1771,15 @@ void i915_reset(struct drm_i915_private *dev_priv)
error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
+ disable_irq(dev_priv->drm.irq);
+ ret = i915_gem_reset_prepare(dev_priv);
+ if (ret) {
+ DRM_ERROR("GPU recovery failed\n");
+ intel_gpu_reset(dev_priv, ALL_ENGINES);
+ goto error;
+ }
- disable_engines_irq(dev_priv);
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
- enable_engines_irq(dev_priv);
-
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1792,7 +1788,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
goto error;
}
- i915_gem_reset(dev_priv);
+ i915_gem_reset_finish(dev_priv);
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */
@@ -1809,13 +1805,16 @@ void i915_reset(struct drm_i915_private *dev_priv)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- ret = i915_gem_init_hw(dev);
+ ret = i915_gem_init_hw(dev_priv);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
goto error;
}
+ i915_queue_hangcheck(dev_priv);
+
wakeup:
+ enable_irq(dev_priv->drm.irq);
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
return;
@@ -2321,12 +2320,12 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_guc_suspend(dev);
+ intel_guc_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
ret = 0;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
bxt_enable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2406,12 +2405,12 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
- intel_guc_resume(dev);
+ intel_guc_resume(dev_priv);
if (IS_GEN6(dev_priv))
- intel_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev_priv);
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
@@ -2550,8 +2549,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
@@ -2567,6 +2566,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 029d5c3c81ef..bcc81912b5e5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -49,17 +49,20 @@
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
#include <drm/drm_auth.h>
+#include <drm/drm_cache.h>
#include "i915_params.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_bios.h"
#include "intel_dpll_mgr.h"
-#include "intel_guc.h"
+#include "intel_uc.h"
#include "intel_lrc.h"
#include "intel_ringbuffer.h"
#include "i915_gem.h"
+#include "i915_gem_context.h"
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
@@ -76,8 +79,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20161121"
-#define DRIVER_TIMESTAMP 1479717903
+#define DRIVER_DATE "20170123"
+#define DRIVER_TIMESTAMP 1485156432
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -119,6 +122,90 @@ bool __i915_inject_load_failure(const char *func, int line);
#define i915_inject_load_failure() \
__i915_inject_load_failure(__func__, __LINE__)
+typedef struct {
+ uint32_t val;
+} uint_fixed_16_16_t;
+
+#define FP_16_16_MAX ({ \
+ uint_fixed_16_16_t fp; \
+ fp.val = UINT_MAX; \
+ fp; \
+})
+
+static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
+{
+ uint_fixed_16_16_t fp;
+
+ WARN_ON(val >> 16);
+
+ fp.val = val << 16;
+ return fp;
+}
+
+static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp)
+{
+ return DIV_ROUND_UP(fp.val, 1 << 16);
+}
+
+static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp)
+{
+ return fp.val >> 16;
+}
+
+static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
+ uint_fixed_16_16_t min2)
+{
+ uint_fixed_16_16_t min;
+
+ min.val = min(min1.val, min2.val);
+ return min;
+}
+
+static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
+ uint_fixed_16_16_t max2)
+{
+ uint_fixed_16_16_t max;
+
+ max.val = max(max1.val, max2.val);
+ return max;
+}
+
+static inline uint_fixed_16_16_t fixed_16_16_div_round_up(uint32_t val,
+ uint32_t d)
+{
+ uint_fixed_16_16_t fp, res;
+
+ fp = u32_to_fixed_16_16(val);
+ res.val = DIV_ROUND_UP(fp.val, d);
+ return res;
+}
+
+static inline uint_fixed_16_16_t fixed_16_16_div_round_up_u64(uint32_t val,
+ uint32_t d)
+{
+ uint_fixed_16_16_t res;
+ uint64_t interm_val;
+
+ interm_val = (uint64_t)val << 16;
+ interm_val = DIV_ROUND_UP_ULL(interm_val, d);
+ WARN_ON(interm_val >> 32);
+ res.val = (uint32_t) interm_val;
+
+ return res;
+}
+
+static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val,
+ uint_fixed_16_16_t mul)
+{
+ uint64_t intermediate_val;
+ uint_fixed_16_16_t fp;
+
+ intermediate_val = (uint64_t) val * mul.val;
+ WARN_ON(intermediate_val >> 32);
+ fp.val = (uint32_t) intermediate_val;
+ return fp;
+}
+
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
@@ -180,21 +267,39 @@ static inline bool transcoder_is_dsi(enum transcoder transcoder)
}
/*
+ * Global legacy plane identifier. Valid only for primary/sprite
+ * planes on pre-g4x, and only for primary planes on g4x+.
+ */
+enum plane {
+ PLANE_A,
+ PLANE_B,
+ PLANE_C,
+};
+#define plane_name(p) ((p) + 'A')
+
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+
+/*
+ * Per-pipe plane identifier.
* I915_MAX_PLANES in the enum below is the maximum (across all platforms)
* number of planes per CRTC. Not all platforms really have this many planes,
* which means some arrays of size I915_MAX_PLANES may have unused entries
* between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
*/
-enum plane {
- PLANE_A = 0,
- PLANE_B,
- PLANE_C,
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
PLANE_CURSOR,
I915_MAX_PLANES,
};
-#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+#define for_each_plane_id_on_crtc(__crtc, __p) \
+ for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
+ for_each_if ((__crtc)->plane_ids_mask & BIT(__p))
enum port {
PORT_NONE = -1,
@@ -216,7 +321,8 @@ enum dpio_channel {
enum dpio_phy {
DPIO_PHY0,
- DPIO_PHY1
+ DPIO_PHY1,
+ DPIO_PHY2,
};
enum intel_display_power_domain {
@@ -416,6 +522,15 @@ struct drm_i915_file_private {
} rps;
unsigned int bsd_engine;
+
+/* Client can have a maximum of 3 contexts banned before
+ * it is denied of creating new contexts. As one context
+ * ban needs 4 consecutive hangs, and more if there is
+ * progress in between, this is a last resort stop gap measure
+ * to limit the badly behaving clients access to gpu.
+ */
+#define I915_MAX_CLIENT_CONTEXT_BANS 3
+ int context_bans;
};
/* Used by dp and fdi links */
@@ -659,32 +774,20 @@ struct intel_csr {
};
#define DEV_INFO_FOR_EACH_FLAG(func) \
- /* Keep is_* in chronological order */ \
func(is_mobile); \
- func(is_i85x); \
- func(is_i915g); \
- func(is_i945gm); \
- func(is_g33); \
- func(is_g4x); \
- func(is_pineview); \
- func(is_broadwater); \
- func(is_crestline); \
- func(is_ivybridge); \
- func(is_valleyview); \
- func(is_cherryview); \
- func(is_haswell); \
- func(is_broadwell); \
- func(is_skylake); \
- func(is_broxton); \
- func(is_kabylake); \
+ func(is_lp); \
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
+ func(has_aliasing_ppgtt); \
func(has_csr); \
func(has_ddi); \
+ func(has_decoupled_mmio); \
func(has_dp_mst); \
func(has_fbc); \
func(has_fpga_dbg); \
+ func(has_full_ppgtt); \
+ func(has_full_48bit_ppgtt); \
func(has_gmbus_irq); \
func(has_gmch_display); \
func(has_guc); \
@@ -705,8 +808,7 @@ struct intel_csr {
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_decoupled_mmio)
+ func(supports_tv);
struct sseu_dev_info {
u8 slice_mask;
@@ -726,13 +828,45 @@ static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
}
+/* Keep in gen based order, and chronological order within a gen */
+enum intel_platform {
+ INTEL_PLATFORM_UNINITIALIZED = 0,
+ INTEL_I830,
+ INTEL_I845G,
+ INTEL_I85X,
+ INTEL_I865G,
+ INTEL_I915G,
+ INTEL_I915GM,
+ INTEL_I945G,
+ INTEL_I945GM,
+ INTEL_G33,
+ INTEL_PINEVIEW,
+ INTEL_I965G,
+ INTEL_I965GM,
+ INTEL_G45,
+ INTEL_GM45,
+ INTEL_IRONLAKE,
+ INTEL_SANDYBRIDGE,
+ INTEL_IVYBRIDGE,
+ INTEL_VALLEYVIEW,
+ INTEL_HASWELL,
+ INTEL_BROADWELL,
+ INTEL_CHERRYVIEW,
+ INTEL_SKYLAKE,
+ INTEL_BROXTON,
+ INTEL_KABYLAKE,
+ INTEL_GEMINILAKE,
+};
+
struct intel_device_info {
u32 display_mmio_offset;
u16 device_id;
u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
u8 gen;
u16 gen_mask;
+ enum intel_platform platform;
u8 ring_mask; /* Rings supported by the HW */
u8 num_rings;
#define DEFINE_FLAG(name) u8 name:1
@@ -800,7 +934,8 @@ struct drm_i915_error_state {
/* Software tracked state */
bool waiting;
int num_waiters;
- int hangcheck_score;
+ unsigned long hangcheck_timestamp;
+ bool hangcheck_stalled;
enum intel_engine_hangcheck_action hangcheck_action;
struct i915_address_space *vm;
int num_requests;
@@ -849,6 +984,7 @@ struct drm_i915_error_state {
long jiffies;
pid_t pid;
u32 context;
+ int ban_score;
u32 seqno;
u32 head;
u32 tail;
@@ -870,6 +1006,7 @@ struct drm_i915_error_state {
pid_t pid;
char comm[TASK_COMM_LEN];
+ int context_bans;
} engine[I915_NUM_ENGINES];
struct drm_i915_error_buffer {
@@ -901,86 +1038,7 @@ enum i915_cache_level {
I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
};
-struct i915_ctx_hang_stats {
- /* This context had batch pending when hang was declared */
- unsigned batch_pending;
-
- /* This context had batch active when hang was declared */
- unsigned batch_active;
-
- /* Time when this context was last blamed for a GPU reset */
- unsigned long guilty_ts;
-
- /* If the contexts causes a second GPU hang within this time,
- * it is permanently banned from submitting any more work.
- */
- unsigned long ban_period_seconds;
-
- /* This context is banned to submit more work */
- bool banned;
-};
-
-/* This must match up with the value previously used for execbuf2.rsvd1. */
-#define DEFAULT_CONTEXT_HANDLE 0
-
-/**
- * struct i915_gem_context - as the name implies, represents a context.
- * @ref: reference count.
- * @user_handle: userspace tracking identity for this context.
- * @remap_slice: l3 row remapping information.
- * @flags: context specific flags:
- * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
- * @file_priv: filp associated with this context (NULL for global default
- * context).
- * @hang_stats: information about the role of this context in possible GPU
- * hangs.
- * @ppgtt: virtual memory space used by this context.
- * @legacy_hw_ctx: render context backing object and whether it is correctly
- * initialized (legacy ring submission mechanism only).
- * @link: link in the global list of contexts.
- *
- * Contexts are memory images used by the hardware to store copies of their
- * internal state.
- */
-struct i915_gem_context {
- struct kref ref;
- struct drm_i915_private *i915;
- struct drm_i915_file_private *file_priv;
- struct i915_hw_ppgtt *ppgtt;
- struct pid *pid;
- const char *name;
-
- struct i915_ctx_hang_stats hang_stats;
-
- unsigned long flags;
-#define CONTEXT_NO_ZEROMAP BIT(0)
-#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
-
- /* Unique identifier for this context, used by the hw for tracking */
- unsigned int hw_id;
- u32 user_handle;
- int priority; /* greater priorities are serviced first */
-
- u32 ggtt_alignment;
-
- struct intel_context {
- struct i915_vma *state;
- struct intel_ring *ring;
- uint32_t *lrc_reg_state;
- u64 lrc_desc;
- int pin_count;
- bool initialised;
- } engine[I915_NUM_ENGINES];
- u32 ring_size;
- u32 desc_template;
- struct atomic_notifier_head status_notifier;
- bool execlists_force_single_submission;
-
- struct list_head link;
-
- u8 remap_slice;
- bool closed:1;
-};
+#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
enum fb_op_origin {
ORIGIN_GTT,
@@ -1027,7 +1085,7 @@ struct intel_fbc {
} plane;
struct {
- uint32_t pixel_format;
+ const struct drm_format_info *format;
unsigned int stride;
} fb;
} state_cache;
@@ -1042,7 +1100,7 @@ struct intel_fbc {
} crtc;
struct {
- uint32_t pixel_format;
+ const struct drm_format_info *format;
unsigned int stride;
} fb;
@@ -1058,7 +1116,7 @@ struct intel_fbc {
const char *no_fbc_reason;
};
-/**
+/*
* HIGH_RR is the highest eDP panel refresh rate read from EDID
* LOW_RR is the lowest eDP panel refresh rate found from EDID
* parsing for same resolution.
@@ -1096,6 +1154,9 @@ struct i915_psr {
bool psr2_support;
bool aux_frame_sync;
bool link_standby;
+ bool y_cord_support;
+ bool colorimetry_support;
+ bool alpm;
};
enum intel_pch {
@@ -1395,7 +1456,7 @@ struct i915_gem_mm {
struct work_struct free_work;
/** Usable portion of the GTT for GEM */
- unsigned long stolen_base; /* limited to low memory (32-bit) */
+ phys_addr_t stolen_base; /* limited to low memory (32-bit) */
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
@@ -1438,19 +1499,20 @@ struct drm_i915_error_state_buf {
};
struct i915_error_state_file_priv {
- struct drm_device *dev;
+ struct drm_i915_private *i915;
struct drm_i915_error_state *error;
};
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
+#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
+#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
+
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
- /* Hang gpu twice in this window and your context gets banned */
-#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
struct delayed_work hangcheck_work;
@@ -1532,6 +1594,7 @@ struct ddi_vbt_port_info {
uint8_t supports_dvi:1;
uint8_t supports_hdmi:1;
uint8_t supports_dp:1;
+ uint8_t supports_edp:1;
uint8_t alternate_aux_channel;
uint8_t alternate_ddc_pin;
@@ -1591,6 +1654,7 @@ struct intel_vbt_data {
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
+ u8 controller; /* brightness controller number */
enum intel_backlight_type type;
} backlight;
@@ -1637,24 +1701,22 @@ struct ilk_wm_values {
};
struct vlv_pipe_wm {
- uint16_t primary;
- uint16_t sprite[2];
- uint8_t cursor;
+ uint16_t plane[I915_MAX_PLANES];
};
struct vlv_sr_wm {
uint16_t plane;
- uint8_t cursor;
+ uint16_t cursor;
+};
+
+struct vlv_wm_ddl_values {
+ uint8_t plane[I915_MAX_PLANES];
};
struct vlv_wm_values {
struct vlv_pipe_wm pipe[3];
struct vlv_sr_wm sr;
- struct {
- uint8_t cursor;
- uint8_t sprite[2];
- uint8_t primary;
- } ddl[3];
+ struct vlv_wm_ddl_values ddl[3];
uint8_t level;
bool cxsr;
};
@@ -1750,6 +1812,7 @@ struct intel_pipe_crc {
enum intel_pipe_crc_source source;
int head, tail;
wait_queue_head_t wq;
+ int skipped;
};
struct i915_frontbuffer_tracking {
@@ -1795,6 +1858,201 @@ struct intel_wm_config {
bool sprites_scaled;
};
+struct i915_oa_format {
+ u32 format;
+ int size;
+};
+
+struct i915_oa_reg {
+ i915_reg_t addr;
+ u32 value;
+};
+
+struct i915_perf_stream;
+
+/**
+ * struct i915_perf_stream_ops - the OPs to support a specific stream type
+ */
+struct i915_perf_stream_ops {
+ /**
+ * @enable: Enables the collection of HW samples, either in response to
+ * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
+ * without `I915_PERF_FLAG_DISABLED`.
+ */
+ void (*enable)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable: Disables the collection of HW samples, either in response
+ * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
+ * the stream.
+ */
+ void (*disable)(struct i915_perf_stream *stream);
+
+ /**
+ * @poll_wait: Call poll_wait, passing a wait queue that will be woken
+ * once there is something ready to read() for the stream
+ */
+ void (*poll_wait)(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait);
+
+ /**
+ * @wait_unlocked: For handling a blocking read, wait until there is
+ * something to ready to read() for the stream. E.g. wait on the same
+ * wait queue that would be passed to poll_wait().
+ */
+ int (*wait_unlocked)(struct i915_perf_stream *stream);
+
+ /**
+ * @read: Copy buffered metrics as records to userspace
+ * **buf**: the userspace, destination buffer
+ * **count**: the number of bytes to copy, requested by userspace
+ * **offset**: zero at the start of the read, updated as the read
+ * proceeds, it represents how many bytes have been copied so far and
+ * the buffer offset for copying the next record.
+ *
+ * Copy as many buffered i915 perf samples and records for this stream
+ * to userspace as will fit in the given buffer.
+ *
+ * Only write complete records; returning -%ENOSPC if there isn't room
+ * for a complete record.
+ *
+ * Return any error condition that results in a short read such as
+ * -%ENOSPC or -%EFAULT, even though these may be squashed before
+ * returning to userspace.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @destroy: Cleanup any stream specific resources.
+ *
+ * The stream will always be disabled before this is called.
+ */
+ void (*destroy)(struct i915_perf_stream *stream);
+};
+
+/**
+ * struct i915_perf_stream - state for a single open stream FD
+ */
+struct i915_perf_stream {
+ /**
+ * @dev_priv: i915 drm device
+ */
+ struct drm_i915_private *dev_priv;
+
+ /**
+ * @link: Links the stream into ``&drm_i915_private->streams``
+ */
+ struct list_head link;
+
+ /**
+ * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
+ * properties given when opening a stream, representing the contents
+ * of a single sample as read() by userspace.
+ */
+ u32 sample_flags;
+
+ /**
+ * @sample_size: Considering the configured contents of a sample
+ * combined with the required header size, this is the total size
+ * of a single sample record.
+ */
+ int sample_size;
+
+ /**
+ * @ctx: %NULL if measuring system-wide across all contexts or a
+ * specific context that is being monitored.
+ */
+ struct i915_gem_context *ctx;
+
+ /**
+ * @enabled: Whether the stream is currently enabled, considering
+ * whether the stream was opened in a disabled state and based
+ * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
+ */
+ bool enabled;
+
+ /**
+ * @ops: The callbacks providing the implementation of this specific
+ * type of configured stream.
+ */
+ const struct i915_perf_stream_ops *ops;
+};
+
+/**
+ * struct i915_oa_ops - Gen specific implementation of an OA unit stream
+ */
+struct i915_oa_ops {
+ /**
+ * @init_oa_buffer: Resets the head and tail pointers of the
+ * circular buffer for periodic OA reports.
+ *
+ * Called when first opening a stream for OA metrics, but also may be
+ * called in response to an OA buffer overflow or other error
+ * condition.
+ *
+ * Note it may be necessary to clear the full OA buffer here as part of
+ * maintaining the invariable that new reports must be written to
+ * zeroed memory for us to be able to reliable detect if an expected
+ * report has not yet landed in memory. (At least on Haswell the OA
+ * buffer tail pointer is not synchronized with reports being visible
+ * to the CPU)
+ */
+ void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
+
+ /**
+ * @enable_metric_set: Applies any MUX configuration to set up the
+ * Boolean and Custom (B/C) counters that are part of the counter
+ * reports being sampled. May apply system constraints such as
+ * disabling EU clock gating as required.
+ */
+ int (*enable_metric_set)(struct drm_i915_private *dev_priv);
+
+ /**
+ * @disable_metric_set: Remove system constraints associated with using
+ * the OA unit.
+ */
+ void (*disable_metric_set)(struct drm_i915_private *dev_priv);
+
+ /**
+ * @oa_enable: Enable periodic sampling
+ */
+ void (*oa_enable)(struct drm_i915_private *dev_priv);
+
+ /**
+ * @oa_disable: Disable periodic sampling
+ */
+ void (*oa_disable)(struct drm_i915_private *dev_priv);
+
+ /**
+ * @read: Copy data from the circular OA buffer into a given userspace
+ * buffer.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @oa_buffer_is_empty: Check if OA buffer empty (false positives OK)
+ *
+ * This is either called via fops or the poll check hrtimer (atomic
+ * ctx) without any locks taken.
+ *
+ * It's safe to read OA config state here unlocked, assuming that this
+ * is only called while the stream is enabled, while the global OA
+ * configuration can't be modified.
+ *
+ * Efficiency is more important than avoiding some false positives
+ * here, which will be handled gracefully - likely resulting in an
+ * %EAGAIN error for userspace.
+ */
+ bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv);
+};
+
struct drm_i915_private {
struct drm_device drm;
@@ -1815,6 +2073,7 @@ struct drm_i915_private {
struct intel_gvt *gvt;
+ struct intel_huc huc;
struct intel_guc guc;
struct intel_csr csr;
@@ -1898,7 +2157,14 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
- unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
+ unsigned int cdclk_freq, max_cdclk_freq;
+
+ /*
+ * For reading holding any crtc lock is sufficient,
+ * for writing must hold all of them.
+ */
+ unsigned int atomic_cdclk_freq;
+
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
unsigned int hpll_freq;
@@ -2051,6 +2317,9 @@ struct drm_i915_private {
} sagv_status;
struct {
+ /* protects DSPARB registers on pre-g4x/vlv/chv */
+ spinlock_t dsparb_lock;
+
/*
* Raw watermark latency values:
* in 0.1us units for WM0,
@@ -2095,6 +2364,54 @@ struct drm_i915_private {
struct i915_runtime_pm pm;
+ struct {
+ bool initialized;
+
+ struct kobject *metrics_kobj;
+ struct ctl_table_header *sysctl_header;
+
+ struct mutex lock;
+ struct list_head streams;
+
+ spinlock_t hook_lock;
+
+ struct {
+ struct i915_perf_stream *exclusive_stream;
+
+ u32 specific_ctx_id;
+
+ struct hrtimer poll_check_timer;
+ wait_queue_head_t poll_wq;
+ bool pollin;
+
+ bool periodic;
+ int period_exponent;
+ int timestamp_frequency;
+
+ int tail_margin;
+
+ int metrics_set;
+
+ const struct i915_oa_reg *mux_regs;
+ int mux_regs_len;
+ const struct i915_oa_reg *b_counter_regs;
+ int b_counter_regs_len;
+
+ struct {
+ struct i915_vma *vma;
+ u8 *vaddr;
+ int format;
+ int format_size;
+ } oa_buffer;
+
+ u32 gen7_latched_oastatus1;
+
+ struct i915_oa_ops ops;
+ const struct i915_oa_format *oa_formats;
+ int n_builtin_sets;
+ } oa;
+ } perf;
+
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
void (*resume)(struct drm_i915_private *);
@@ -2137,6 +2454,8 @@ struct drm_i915_private {
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
+ bool ipc_enabled;
+
/* Used to save the pipe-to-encoder mapping for audio */
struct intel_encoder *av_enc_map[I915_MAX_PIPES];
@@ -2291,102 +2610,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) < (__iter).max) || \
((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
-/*
- * A command that requires special handling by the command parser.
- */
-struct drm_i915_cmd_descriptor {
- /*
- * Flags describing how the command parser processes the command.
- *
- * CMD_DESC_FIXED: The command has a fixed length if this is set,
- * a length mask if not set
- * CMD_DESC_SKIP: The command is allowed but does not follow the
- * standard length encoding for the opcode range in
- * which it falls
- * CMD_DESC_REJECT: The command is never allowed
- * CMD_DESC_REGISTER: The command should be checked against the
- * register whitelist for the appropriate ring
- * CMD_DESC_MASTER: The command is allowed if the submitting process
- * is the DRM master
- */
- u32 flags;
-#define CMD_DESC_FIXED (1<<0)
-#define CMD_DESC_SKIP (1<<1)
-#define CMD_DESC_REJECT (1<<2)
-#define CMD_DESC_REGISTER (1<<3)
-#define CMD_DESC_BITMASK (1<<4)
-#define CMD_DESC_MASTER (1<<5)
-
- /*
- * The command's unique identification bits and the bitmask to get them.
- * This isn't strictly the opcode field as defined in the spec and may
- * also include type, subtype, and/or subop fields.
- */
- struct {
- u32 value;
- u32 mask;
- } cmd;
-
- /*
- * The command's length. The command is either fixed length (i.e. does
- * not include a length field) or has a length field mask. The flag
- * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
- * a length mask. All command entries in a command table must include
- * length information.
- */
- union {
- u32 fixed;
- u32 mask;
- } length;
-
- /*
- * Describes where to find a register address in the command to check
- * against the ring's register whitelist. Only valid if flags has the
- * CMD_DESC_REGISTER bit set.
- *
- * A non-zero step value implies that the command may access multiple
- * registers in sequence (e.g. LRI), in that case step gives the
- * distance in dwords between individual offset fields.
- */
- struct {
- u32 offset;
- u32 mask;
- u32 step;
- } reg;
-
-#define MAX_CMD_DESC_BITMASKS 3
- /*
- * Describes command checks where a particular dword is masked and
- * compared against an expected value. If the command does not match
- * the expected value, the parser rejects it. Only valid if flags has
- * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
- * are valid.
- *
- * If the check specifies a non-zero condition_mask then the parser
- * only performs the check when the bits specified by condition_mask
- * are non-zero.
- */
- struct {
- u32 offset;
- u32 mask;
- u32 expected;
- u32 condition_offset;
- u32 condition_mask;
- } bits[MAX_CMD_DESC_BITMASKS];
-};
-
-/*
- * A table of commands requiring special handling by the command parser.
- *
- * Each engine has an array of tables. Each table consists of an array of
- * command descriptors, which must be sorted with command opcodes in
- * ascending order.
- */
-struct drm_i915_cmd_table {
- const struct drm_i915_cmd_descriptor *table;
- int count;
-};
-
static inline const struct intel_device_info *
intel_info(const struct drm_i915_private *dev_priv)
{
@@ -2428,34 +2651,36 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577)
-#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562)
-#define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x)
-#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572)
-#define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g)
-#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592)
-#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772)
-#define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm)
-#define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater)
-#define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline)
-#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42)
-#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x)
+#define IS_I830(dev_priv) ((dev_priv)->info.platform == INTEL_I830)
+#define IS_I845G(dev_priv) ((dev_priv)->info.platform == INTEL_I845G)
+#define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X)
+#define IS_I865G(dev_priv) ((dev_priv)->info.platform == INTEL_I865G)
+#define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G)
+#define IS_I915GM(dev_priv) ((dev_priv)->info.platform == INTEL_I915GM)
+#define IS_I945G(dev_priv) ((dev_priv)->info.platform == INTEL_I945G)
+#define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM)
+#define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G)
+#define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM)
+#define IS_G45(dev_priv) ((dev_priv)->info.platform == INTEL_G45)
+#define IS_GM45(dev_priv) ((dev_priv)->info.platform == INTEL_GM45)
+#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
-#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview)
-#define IS_G33(dev_priv) ((dev_priv)->info.is_g33)
+#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_PINEVIEW)
+#define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
-#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
+#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
INTEL_DEVID(dev_priv) == 0x0152 || \
INTEL_DEVID(dev_priv) == 0x015a)
-#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview)
-#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview)
-#define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell)
-#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell)
-#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
-#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
-#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
+#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW)
+#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW)
+#define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL)
+#define IS_BROADWELL(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWELL)
+#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_SKYLAKE)
+#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON)
+#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE)
+#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE)
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
@@ -2512,6 +2737,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define BXT_REVID_A0 0x0
#define BXT_REVID_A1 0x1
#define BXT_REVID_B0 0x3
+#define BXT_REVID_B_LAST 0x8
#define BXT_REVID_C0 0x9
#define IS_BXT_REVID(dev_priv, since, until) \
@@ -2541,6 +2767,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
+#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && INTEL_INFO(dev_priv)->is_lp)
+#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
+
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
#define BSD_RING ENGINE_MASK(VCS)
@@ -2577,7 +2806,7 @@ intel_info(const struct drm_i915_private *dev_priv)
((dev_priv)->info.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv))
+#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@@ -2630,6 +2859,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
@@ -2686,9 +2916,6 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
return false;
}
-extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
-extern int i915_resume_switcheroo(struct drm_device *dev);
-
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt);
@@ -2891,10 +3118,10 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -2902,23 +3129,37 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_load_init(struct drm_device *dev);
-void i915_gem_load_cleanup(struct drm_device *dev);
+int i915_gem_load_init(struct drm_i915_private *dev_priv);
+void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
-void *i915_gem_object_alloc(struct drm_device *dev);
+void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
-struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
- u64 size);
-struct drm_i915_gem_object *i915_gem_object_create_from_data(
- struct drm_device *dev, const void *data, size_t size);
+struct drm_i915_gem_object *
+i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
+struct drm_i915_gem_object *
+i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
+ const void *data, size_t size);
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);
+static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+ /* A single pass should suffice to release all the freed objects (along
+ * most call paths) , but be a little more paranoid in that freeing
+ * the objects does take a little amount of time, during which the rcu
+ * callbacks could have added new objects into the freed list, and
+ * armed the work again.
+ */
+ do {
+ rcu_barrier();
+ } while (flush_work(&i915->mm.free_work));
+}
+
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -2988,7 +3229,6 @@ __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!obj->mm.pages);
atomic_dec(&obj->mm.pages_pin_count);
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
static inline void
@@ -3013,8 +3253,8 @@ enum i915_map_type {
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
- * @obj - the object to map into kernel address space
- * @type - the type of mapping, used to select pgprot_t
+ * @obj: the object to map into kernel address space
+ * @type: the type of mapping, used to select pgprot_t
*
* Calls i915_gem_object_pin_pages() to prevent reaping of the object's
* pages and then returns a contiguous mapping of the backing storage into
@@ -3032,7 +3272,7 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
/**
* i915_gem_object_unpin_map - releases an earlier mapping
- * @obj - the object to unmap
+ * @obj: the object to unmap
*
* After pinning the object and mapping its pages, once you are finished
* with your access, call i915_gem_object_unpin_map() to release the pin
@@ -3100,17 +3340,18 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return READ_ONCE(error->reset_count);
}
-void i915_gem_reset(struct drm_i915_private *dev_priv);
+int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
+void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
-int __must_check i915_gem_init(struct drm_device *dev);
-int __must_check i915_gem_init_hw(struct drm_device *dev);
+int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_engines(struct drm_device *dev);
+void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
-int __must_check i915_gem_suspend(struct drm_device *dev);
-void i915_gem_resume(struct drm_device *dev);
+int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_resume(struct drm_i915_private *dev_priv);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
@@ -3136,11 +3377,6 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
-u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
- int tiling_mode);
-u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
- int tiling_mode, bool fenced);
-
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
@@ -3150,33 +3386,17 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-struct i915_vma *
-i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
return container_of(vm, struct i915_hw_ppgtt, base);
}
-static inline struct i915_vma *
-i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
-{
- return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
-}
-
/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
+void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
@@ -3185,23 +3405,6 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
-/* i915_gem_context.c */
-int __must_check i915_gem_context_init(struct drm_device *dev);
-void i915_gem_context_lost(struct drm_i915_private *dev_priv);
-void i915_gem_context_fini(struct drm_device *dev);
-int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct drm_i915_gem_request *req);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
-struct i915_vma *
-i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
- unsigned int flags);
-void i915_gem_context_free(struct kref *ctx_ref);
-struct drm_i915_gem_object *
-i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
-struct i915_gem_context *
-i915_gem_context_create_gvt(struct drm_device *dev);
-
static inline struct i915_gem_context *
i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
{
@@ -3229,6 +3432,14 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_free);
}
+static inline void i915_gem_context_put_unlocked(struct i915_gem_context *ctx)
+{
+ struct mutex *lock = &ctx->i915->drm.struct_mutex;
+
+ if (kref_put_mutex(&ctx->ref, i915_gem_context_free, lock))
+ mutex_unlock(lock);
+}
+
static inline struct intel_timeline *
i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
@@ -3239,21 +3450,8 @@ i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
return &vm->timeline.engine[engine->id];
}
-static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
-{
- return c->user_handle == DEFAULT_CONTEXT_HANDLE;
-}
-
-int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
+int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
@@ -3261,7 +3459,9 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
unsigned cache_level,
u64 start, u64 end,
unsigned flags);
-int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
+int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
+ struct drm_mm_node *node,
+ unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
/* belongs in i915_gem_gtt.h */
@@ -3285,9 +3485,9 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
u32 stolen_offset,
u32 gtt_offset,
u32 size);
@@ -3295,7 +3495,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
/* i915_gem_internal.c */
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
- unsigned int size);
+ phys_addr_t size);
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
@@ -3320,6 +3520,11 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
i915_gem_object_is_tiled(obj);
}
+u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
+ unsigned int tiling, unsigned int stride);
+u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
+ unsigned int tiling, unsigned int stride);
+
/* i915_debugfs.c */
#ifdef CONFIG_DEBUG_FS
int i915_debugfs_register(struct drm_i915_private *dev_priv);
@@ -3355,7 +3560,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
-void i915_destroy_error_state(struct drm_device *dev);
+void i915_destroy_error_state(struct drm_i915_private *dev_priv);
#else
@@ -3365,7 +3570,7 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
{
}
-static inline void i915_destroy_error_state(struct drm_device *dev)
+static inline void i915_destroy_error_state(struct drm_i915_private *dev_priv)
{
}
@@ -3377,7 +3582,6 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
@@ -3385,9 +3589,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
u32 batch_len,
bool is_master);
+/* i915_perf.c */
+extern void i915_perf_init(struct drm_i915_private *dev_priv);
+extern void i915_perf_fini(struct drm_i915_private *dev_priv);
+extern void i915_perf_register(struct drm_i915_private *dev_priv);
+extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
+
/* i915_suspend.c */
-extern int i915_save_state(struct drm_device *dev);
-extern int i915_restore_state(struct drm_device *dev);
+extern int i915_save_state(struct drm_i915_private *dev_priv);
+extern int i915_restore_state(struct drm_i915_private *dev_priv);
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_i915_private *dev_priv);
@@ -3402,8 +3612,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
bool dp_output, int link_rate);
/* intel_i2c.c */
-extern int intel_setup_gmbus(struct drm_device *dev);
-extern void intel_teardown_gmbus(struct drm_device *dev);
+extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
+extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
unsigned int pin);
@@ -3415,7 +3625,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
{
return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
}
-extern void intel_i2c_reset(struct drm_device *dev);
+extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
/* intel_bios.c */
int intel_bios_init(struct drm_i915_private *dev_priv);
@@ -3482,6 +3692,7 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)&dev_priv->info;
}
+const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
void intel_device_info_dump(struct drm_i915_private *dev_priv);
@@ -3498,9 +3709,9 @@ extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
-extern void intel_init_pch_refclk(struct drm_device *dev);
+extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
-extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
+extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
@@ -3545,7 +3756,7 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
/* intel_dpio_phy.c */
-void bxt_port_to_phy_channel(enum port port,
+void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch);
void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
enum port port, u32 margin, u32 scale,
@@ -3812,29 +4023,25 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
+ * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
+ * perform the operation. To check beforehand, pass in the parameters to
+ * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
+ * you only need to pass in the minor offsets, page-aligned pointers are
+ * always valid.
+ *
+ * For just checking for SSE4.1, in the foreknowledge that the future use
+ * will be correctly aligned, just use i915_has_memcpy_from_wc().
+ */
+#define i915_can_memcpy_from_wc(dst, src, len) \
+ i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
+
+#define i915_has_memcpy_from_wc() \
+ i915_memcpy_from_wc(NULL, NULL, 0)
+
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
-#define ptr_mask_bits(ptr) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v & PAGE_MASK); \
-})
-
-#define ptr_unpack_bits(ptr, bits) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (bits) = __v & ~PAGE_MASK; \
- (typeof(ptr))(__v & PAGE_MASK); \
-})
-
-#define ptr_pack_bits(ptr, bits) \
- ((typeof(ptr))((unsigned long)(ptr) | (bits)))
-
-#define fetch_and_zero(ptr) ({ \
- typeof(*ptr) __T = *(ptr); \
- *(ptr) = (typeof(*ptr))0; \
- __T; \
-})
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 24b5b046754b..88f3628b4e29 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,6 +38,7 @@
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
+#include <linux/stop_machine.h>
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
@@ -68,11 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
- size, 0, -1,
- 0, ggtt->mappable_end,
- DRM_MM_SEARCH_DEFAULT,
- DRM_MM_CREATE_DEFAULT);
+ return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+ size, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
}
static void
@@ -440,7 +440,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
timeout = i915_gem_object_wait_fence(shared[i],
flags, timeout,
rps);
- if (timeout <= 0)
+ if (timeout < 0)
break;
dma_fence_put(shared[i]);
@@ -453,7 +453,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
excl = reservation_object_get_excl_rcu(resv);
}
- if (excl && timeout > 0)
+ if (excl && timeout >= 0)
timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
dma_fence_put(excl);
@@ -612,9 +612,8 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
-void *i915_gem_object_alloc(struct drm_device *dev)
+void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
}
@@ -626,7 +625,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
static int
i915_gem_create(struct drm_file *file,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
uint64_t size,
uint32_t *handle_p)
{
@@ -639,7 +638,7 @@ i915_gem_create(struct drm_file *file,
return -EINVAL;
/* Allocate the new object */
- obj = i915_gem_object_create(dev, size);
+ obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -661,7 +660,7 @@ i915_gem_dumb_create(struct drm_file *file,
/* have to work out size/pitch and return them */
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
- return i915_gem_create(file, dev,
+ return i915_gem_create(file, to_i915(dev),
args->size, &args->handle);
}
@@ -675,11 +674,12 @@ int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_create *args = data;
- i915_gem_flush_free_objects(to_i915(dev));
+ i915_gem_flush_free_objects(dev_priv);
- return i915_gem_create(file, dev,
+ return i915_gem_create(file, dev_priv,
args->size, &args->handle);
}
@@ -1114,8 +1114,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
/* Bounds check source. */
- if (args->offset > obj->base.size ||
- args->size > obj->base.size - args->offset) {
+ if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
ret = -EINVAL;
goto out;
}
@@ -1428,8 +1427,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
/* Bounds check destination. */
- if (args->offset > obj->base.size ||
- args->size > obj->base.size - args->offset) {
+ if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
ret = -EINVAL;
goto err;
}
@@ -1491,7 +1489,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!i915_vma_is_ggtt(vma))
- continue;
+ break;
if (i915_vma_is_active(vma))
continue;
@@ -1696,12 +1694,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
- u64 size;
-
- size = i915_gem_object_get_stride(obj);
- size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
-
- return size >> PAGE_SHIFT;
+ return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
}
/**
@@ -1754,6 +1747,29 @@ int i915_gem_mmap_gtt_version(void)
return 1;
}
+static inline struct i915_ggtt_view
+compute_partial_view(struct drm_i915_gem_object *obj,
+ pgoff_t page_offset,
+ unsigned int chunk)
+{
+ struct i915_ggtt_view view;
+
+ if (i915_gem_object_is_tiled(obj))
+ chunk = roundup(chunk, tile_row_pages(obj));
+
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ view.partial.offset = rounddown(page_offset, chunk);
+ view.partial.size =
+ min_t(unsigned int, chunk,
+ (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
+
+ /* If the partial covers the entire object, just create a normal VMA. */
+ if (chunk >= obj->base.size >> PAGE_SHIFT)
+ view.type = I915_GGTT_VIEW_NORMAL;
+
+ return view;
+}
+
/**
* i915_gem_fault - fault a page into the GTT
* @area: CPU VMA in question
@@ -1830,26 +1846,9 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
if (IS_ERR(vma)) {
- struct i915_ggtt_view view;
- unsigned int chunk_size;
-
/* Use a partial view if it is bigger than available space */
- chunk_size = MIN_CHUNK_PAGES;
- if (i915_gem_object_is_tiled(obj))
- chunk_size = roundup(chunk_size, tile_row_pages(obj));
-
- memset(&view, 0, sizeof(view));
- view.type = I915_GGTT_VIEW_PARTIAL;
- view.params.partial.offset = rounddown(page_offset, chunk_size);
- view.params.partial.size =
- min_t(unsigned int, chunk_size,
- vma_pages(area) - view.params.partial.offset);
-
- /* If the partial covers the entire object, just create a
- * normal VMA.
- */
- if (chunk_size >= obj->base.size >> PAGE_SHIFT)
- view.type = I915_GGTT_VIEW_NORMAL;
+ struct i915_ggtt_view view =
+ compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
/* Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
@@ -1878,7 +1877,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
+ area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
@@ -2029,91 +2028,27 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
}
}
-/**
- * i915_gem_get_ggtt_size - return required global GTT size for an object
- * @dev_priv: i915 device
- * @size: object size
- * @tiling_mode: tiling mode
- *
- * Return the required global GTT size for an object, taking into account
- * potential fence register mapping.
- */
-u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
- u64 size, int tiling_mode)
-{
- u64 ggtt_size;
-
- GEM_BUG_ON(size == 0);
-
- if (INTEL_GEN(dev_priv) >= 4 ||
- tiling_mode == I915_TILING_NONE)
- return size;
-
- /* Previous chips need a power-of-two fence region when tiling */
- if (IS_GEN3(dev_priv))
- ggtt_size = 1024*1024;
- else
- ggtt_size = 512*1024;
-
- while (ggtt_size < size)
- ggtt_size <<= 1;
-
- return ggtt_size;
-}
-
-/**
- * i915_gem_get_ggtt_alignment - return required global GTT alignment
- * @dev_priv: i915 device
- * @size: object size
- * @tiling_mode: tiling mode
- * @fenced: is fenced alignment required or not
- *
- * Return the required global GTT alignment for an object, taking into account
- * potential fence register mapping.
- */
-u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
- int tiling_mode, bool fenced)
-{
- GEM_BUG_ON(size == 0);
-
- /*
- * Minimum alignment is 4k (GTT page size), but might be greater
- * if a fence register is needed for the object.
- */
- if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
- tiling_mode == I915_TILING_NONE)
- return 4096;
-
- /*
- * Previous chips need to be aligned to the size of the smallest
- * fence register that can contain the object.
- */
- return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
-}
-
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int err;
err = drm_gem_create_mmap_offset(&obj->base);
- if (!err)
+ if (likely(!err))
return 0;
- /* We can idle the GPU locklessly to flush stale objects, but in order
- * to claim that space for ourselves, we need to take the big
- * struct_mutex to free the requests+objects and allocate our slot.
- */
- err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
- if (err)
- return err;
+ /* Attempt to reap some mmap space from dead objects */
+ do {
+ err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+ if (err)
+ break;
- err = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (!err) {
- i915_gem_retire_requests(dev_priv);
+ i915_gem_drain_freed_objects(dev_priv);
err = drm_gem_create_mmap_offset(&obj->base);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
+ if (!err)
+ break;
+
+ } while (flush_delayed_work(&dev_priv->gt.retire_work));
return err;
}
@@ -2306,6 +2241,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
/* called before being DMA mapped, no need to copy sg->dma_* */
new_sg = sg_next(new_sg);
}
+ GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
sg_free_table(orig_st);
@@ -2627,35 +2563,34 @@ err_unlock:
goto out_unlock;
}
-static bool i915_context_is_banned(const struct i915_gem_context *ctx)
+static bool ban_context(const struct i915_gem_context *ctx)
{
- unsigned long elapsed;
+ return (i915_gem_context_is_bannable(ctx) &&
+ ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD);
+}
- if (ctx->hang_stats.banned)
- return true;
+static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
+{
+ ctx->guilty_count++;
+ ctx->ban_score += CONTEXT_SCORE_GUILTY;
+ if (ban_context(ctx))
+ i915_gem_context_set_banned(ctx);
- elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
- if (ctx->hang_stats.ban_period_seconds &&
- elapsed <= ctx->hang_stats.ban_period_seconds) {
- DRM_DEBUG("context hanging too fast, banning!\n");
- return true;
- }
+ DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
+ ctx->name, ctx->ban_score,
+ yesno(i915_gem_context_is_banned(ctx)));
+
+ if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv))
+ return;
- return false;
+ ctx->file_priv->context_bans++;
+ DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
+ ctx->name, ctx->file_priv->context_bans);
}
-static void i915_set_reset_status(struct i915_gem_context *ctx,
- const bool guilty)
+static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
- struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
-
- if (guilty) {
- hs->banned = i915_context_is_banned(ctx);
- hs->batch_active++;
- hs->guilty_ts = get_seconds();
- } else {
- hs->batch_pending++;
- }
+ ctx->active_count++;
}
struct drm_i915_gem_request *
@@ -2675,13 +2610,52 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
if (__i915_gem_request_completed(request))
continue;
+ GEM_BUG_ON(request->engine != engine);
return request;
}
return NULL;
}
-static void reset_request(struct drm_i915_gem_request *request)
+static bool engine_stalled(struct intel_engine_cs *engine)
+{
+ if (!engine->hangcheck.stalled)
+ return false;
+
+ /* Check for possible seqno movement after hang declaration */
+ if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
+ DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
+ return false;
+ }
+
+ return true;
+}
+
+int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Ensure irq handler finishes, and not run again. */
+ for_each_engine(engine, dev_priv, id) {
+ struct drm_i915_gem_request *request;
+
+ tasklet_kill(&engine->irq_tasklet);
+
+ if (engine_stalled(engine)) {
+ request = i915_gem_find_active_request(engine);
+ if (request && request->fence.error == -EIO)
+ err = -EIO; /* Previous reset failed! */
+ }
+ }
+
+ i915_gem_revoke_fences(dev_priv);
+
+ return err;
+}
+
+static void skip_request(struct drm_i915_gem_request *request)
{
void *vaddr = request->ring->vaddr;
u32 head;
@@ -2696,66 +2670,93 @@ static void reset_request(struct drm_i915_gem_request *request)
head = 0;
}
memset(vaddr + head, 0, request->postfix - head);
+
+ dma_fence_set_error(&request->fence, -EIO);
}
-static void i915_gem_reset_engine(struct intel_engine_cs *engine)
+static void engine_skip_context(struct drm_i915_gem_request *request)
{
- struct drm_i915_gem_request *request;
- struct i915_gem_context *incomplete_ctx;
+ struct intel_engine_cs *engine = request->engine;
+ struct i915_gem_context *hung_ctx = request->ctx;
struct intel_timeline *timeline;
unsigned long flags;
- bool ring_hung;
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
+ timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
- request = i915_gem_find_active_request(engine);
- if (!request)
- return;
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock(&timeline->lock);
- ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
- if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
- ring_hung = false;
+ list_for_each_entry_continue(request, &engine->timeline->requests, link)
+ if (request->ctx == hung_ctx)
+ skip_request(request);
- i915_set_reset_status(request->ctx, ring_hung);
- if (!ring_hung)
- return;
+ list_for_each_entry(request, &timeline->requests, link)
+ skip_request(request);
- DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->global_seqno);
+ spin_unlock(&timeline->lock);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
- /* Setup the CS to resume from the breadcrumb of the hung request */
- engine->reset_hw(engine, request);
+/* Returns true if the request was guilty of hang */
+static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
+{
+ /* Read once and return the resolution */
+ const bool guilty = engine_stalled(request->engine);
- /* Users of the default context do not rely on logical state
- * preserved between batches. They have to emit full state on
- * every batch and so it is safe to execute queued requests following
- * the hang.
+ /* The guilty request will get skipped on a hung engine.
+ *
+ * Users of client default contexts do not rely on logical
+ * state preserved between batches so it is safe to execute
+ * queued requests following the hang. Non default contexts
+ * rely on preserved state, so skipping a batch loses the
+ * evolution of the state and it needs to be considered corrupted.
+ * Executing more queued batches on top of corrupted state is
+ * risky. But we take the risk by trying to advance through
+ * the queued requests in order to make the client behaviour
+ * more predictable around resets, by not throwing away random
+ * amount of batches it has prepared for execution. Sophisticated
+ * clients can use gem_reset_stats_ioctl and dma fence status
+ * (exported via sync_file info ioctl on explicit fences) to observe
+ * when it loses the context state and should rebuild accordingly.
*
- * Other contexts preserve state, now corrupt. We want to skip all
- * queued requests that reference the corrupt context.
+ * The context ban, and ultimately the client ban, mechanism are safety
+ * valves if client submission ends up resulting in nothing more than
+ * subsequent hangs.
*/
- incomplete_ctx = request->ctx;
- if (i915_gem_context_is_default(incomplete_ctx))
- return;
- timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+ if (guilty) {
+ i915_gem_context_mark_guilty(request->ctx);
+ skip_request(request);
+ } else {
+ i915_gem_context_mark_innocent(request->ctx);
+ dma_fence_set_error(&request->fence, -EAGAIN);
+ }
- spin_lock_irqsave(&engine->timeline->lock, flags);
- spin_lock(&timeline->lock);
+ return guilty;
+}
- list_for_each_entry_continue(request, &engine->timeline->requests, link)
- if (request->ctx == incomplete_ctx)
- reset_request(request);
+static void i915_gem_reset_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *request;
- list_for_each_entry(request, &timeline->requests, link)
- reset_request(request);
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
- spin_unlock(&timeline->lock);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ request = i915_gem_find_active_request(engine);
+ if (request && i915_gem_reset_request(request)) {
+ DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
+ engine->name, request->global_seqno);
+
+ /* If this context is now banned, skip all pending requests. */
+ if (i915_gem_context_is_banned(request->ctx))
+ engine_skip_context(request);
+ }
+
+ /* Setup the CS to resume from the breadcrumb of the hung request */
+ engine->reset_hw(engine, request);
}
-void i915_gem_reset(struct drm_i915_private *dev_priv)
+void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -2779,14 +2780,30 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request)
{
+ dma_fence_set_error(&request->fence, -EIO);
i915_gem_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
}
-static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
+static void engine_set_wedged(struct intel_engine_cs *engine)
{
+ struct drm_i915_gem_request *request;
+ unsigned long flags;
+
+ /* We need to be sure that no thread is running the old callback as
+ * we install the nop handler (otherwise we would submit a request
+ * to hardware that will never complete). In order to prevent this
+ * race, we wait until the machine is idle before making the swap
+ * (using stop_machine()).
+ */
engine->submit_request = nop_submit_request;
+ /* Mark all executing requests as skipped */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ list_for_each_entry(request, &engine->timeline->requests, link)
+ dma_fence_set_error(&request->fence, -EIO);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
/* Mark all pending requests as complete so that any concurrent
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
@@ -2815,20 +2832,29 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
}
}
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
+static int __i915_gem_set_wedged_BKL(void *data)
{
+ struct drm_i915_private *i915 = data;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ for_each_engine(engine, i915, id)
+ engine_set_wedged(engine);
+
+ return 0;
+}
+
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
+{
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
- i915_gem_context_lost(dev_priv);
- for_each_engine(engine, dev_priv, id)
- i915_gem_cleanup_engine(engine);
- mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
+ stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
+ i915_gem_context_lost(dev_priv);
i915_gem_retire_requests(dev_priv);
+
+ mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
}
static void
@@ -3373,7 +3399,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
- int ret;
+ int ret = 0;
switch (args->caching) {
case I915_CACHING_NONE:
@@ -3398,20 +3424,29 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = i915_mutex_lock_interruptible(dev);
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (obj->cache_level == level)
+ goto out;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
- return ret;
+ goto out;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj) {
- ret = -ENOENT;
- goto unlock;
- }
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto out;
ret = i915_gem_object_set_cache_level(obj, level);
- i915_gem_object_put(obj);
-unlock:
mutex_unlock(&dev->struct_mutex);
+
+out:
+ i915_gem_object_put(obj);
return ret;
}
@@ -3461,7 +3496,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* try to preserve the existing ABI).
*/
vma = ERR_PTR(-ENOSPC);
- if (view->type == I915_GGTT_VIEW_NORMAL)
+ if (!view || view->type == I915_GGTT_VIEW_NORMAL)
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
PIN_MAPPABLE | PIN_NONBLOCK);
if (IS_ERR(vma)) {
@@ -3514,17 +3549,16 @@ err_unpin_display:
void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(vma->obj->pin_display == 0))
return;
if (--vma->obj->pin_display == 0)
- vma->display_alignment = 0;
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */
- if (!i915_vma_is_active(vma))
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ i915_gem_object_bump_inactive_ggtt(vma->obj);
i915_vma_unpin(vma);
}
@@ -3655,8 +3689,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
- if (IS_ERR(vma))
+ vma = i915_vma_instance(obj, vm, view);
+ if (unlikely(IS_ERR(vma)))
return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
@@ -3665,10 +3699,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
if (flags & PIN_MAPPABLE) {
- u32 fence_size;
-
- fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
- i915_gem_object_get_tiling(obj));
/* If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
@@ -3676,7 +3706,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
* the object in and out of the Global GTT and
* waste a lot of cycles under the mutex.
*/
- if (fence_size > dev_priv->ggtt.mappable_end)
+ if (vma->fence_size > dev_priv->ggtt.mappable_end)
return ERR_PTR(-E2BIG);
/* If NONBLOCK is set the caller is optimistically
@@ -3695,7 +3725,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
* we could try to minimise harm to others.
*/
if (flags & PIN_NONBLOCK &&
- fence_size > dev_priv->ggtt.mappable_end / 2)
+ vma->fence_size > dev_priv->ggtt.mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
@@ -3948,14 +3978,9 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.put_pages = i915_gem_object_put_pages_gtt,
};
-/* Note we don't consider signbits :| */
-#define overflows_type(x, T) \
- (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
-
struct drm_i915_gem_object *
-i915_gem_object_create(struct drm_device *dev, u64 size)
+i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
@@ -3972,16 +3997,16 @@ i915_gem_object_create(struct drm_device *dev, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(dev);
+ obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_object_init(dev, &obj->base, size);
+ ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
if (ret)
goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
- if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
+ if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
/* 965gm cannot relocate objects above 4GiB. */
mask &= ~__GFP_HIGHMEM;
mask |= __GFP_DMA32;
@@ -4174,12 +4199,13 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id)
- GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
+ GEM_BUG_ON(engine->last_retired_context &&
+ !i915_gem_context_is_kernel(engine->last_retired_context));
}
-int i915_gem_suspend(struct drm_device *dev)
+int i915_gem_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
int ret;
intel_suspend_gt_powersave(dev_priv);
@@ -4213,8 +4239,14 @@ int i915_gem_suspend(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
- flush_delayed_work(&dev_priv->gt.idle_work);
- flush_work(&dev_priv->mm.free_work);
+
+ /* As the idle_work is rearming if it detects a race, play safe and
+ * repeat the flush until it is definitely idle.
+ */
+ while (flush_delayed_work(&dev_priv->gt.idle_work))
+ ;
+
+ i915_gem_drain_freed_objects(dev_priv);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
@@ -4253,9 +4285,9 @@ err:
return ret;
}
-void i915_gem_resume(struct drm_device *dev)
+void i915_gem_resume(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
WARN_ON(dev_priv->gt.awake);
@@ -4320,9 +4352,8 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
}
int
-i915_gem_init_hw(struct drm_device *dev)
+i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
@@ -4376,10 +4407,10 @@ i915_gem_init_hw(struct drm_device *dev)
goto out;
}
- intel_mocs_init_l3cc_table(dev);
+ intel_mocs_init_l3cc_table(dev_priv);
/* We can't enable contexts until all firmware is loaded */
- ret = intel_guc_setup(dev);
+ ret = intel_guc_setup(dev_priv);
if (ret)
goto out;
@@ -4409,12 +4440,11 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
return true;
}
-int i915_gem_init(struct drm_device *dev)
+int i915_gem_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
if (!i915.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
@@ -4438,15 +4468,15 @@ int i915_gem_init(struct drm_device *dev)
if (ret)
goto out_unlock;
- ret = i915_gem_context_init(dev);
+ ret = i915_gem_context_init(dev_priv);
if (ret)
goto out_unlock;
- ret = intel_engines_init(dev);
+ ret = intel_engines_init(dev_priv);
if (ret)
goto out_unlock;
- ret = i915_gem_init_hw(dev);
+ ret = i915_gem_init_hw(dev_priv);
if (ret == -EIO) {
/* Allow engine initialisation to fail by marking the GPU as
* wedged. But we only want to do this where the GPU is angry,
@@ -4459,15 +4489,14 @@ int i915_gem_init(struct drm_device *dev)
out_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
void
-i915_gem_cleanup_engines(struct drm_device *dev)
+i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -4483,8 +4512,9 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv))
dev_priv->num_fence_regs = 32;
- else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
- IS_I945GM(dev_priv) || IS_G33(dev_priv))
+ else if (INTEL_INFO(dev_priv)->gen >= 4 ||
+ IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
dev_priv->num_fence_regs = 16;
else
dev_priv->num_fence_regs = 8;
@@ -4507,9 +4537,8 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
}
int
-i915_gem_load_init(struct drm_device *dev)
+i915_gem_load_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int err = -ENOMEM;
dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
@@ -4578,10 +4607,8 @@ err_out:
return err;
}
-void i915_gem_load_cleanup(struct drm_device *dev)
+void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
WARN_ON(!llist_empty(&dev_priv->mm.free_list));
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -4732,7 +4759,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
-i915_gem_object_create_from_data(struct drm_device *dev,
+i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
const void *data, size_t size)
{
struct drm_i915_gem_object *obj;
@@ -4740,7 +4767,7 @@ i915_gem_object_create_from_data(struct drm_device *dev,
size_t bytes;
int ret;
- obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
+ obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
if (IS_ERR(obj))
return obj;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 51ec793f2e20..a585d47c420a 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -27,8 +27,10 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
+#define GEM_WARN_ON(expr) WARN_ON(expr)
#else
-#define GEM_BUG_ON(expr) do { } while (0)
+#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
#endif
#define I915_NUM_ENGINES 5
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 1f94b8d6d83d..17f90c618208 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -97,7 +97,7 @@
* part. It should be safe to decrease this, but it's more future proof as is.
*/
#define GEN6_CONTEXT_ALIGN (64<<10)
-#define GEN7_CONTEXT_ALIGN 4096
+#define GEN7_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{
@@ -141,7 +141,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
trace_i915_context_free(ctx);
- GEM_BUG_ON(!ctx->closed);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
i915_ppgtt_put(ctx->ppgtt);
@@ -166,15 +166,15 @@ void i915_gem_context_free(struct kref *ctx_ref)
kfree(ctx);
}
-struct drm_i915_gem_object *
-i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
+static struct drm_i915_gem_object *
+alloc_context_obj(struct drm_i915_private *dev_priv, u64 size)
{
struct drm_i915_gem_object *obj;
int ret;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
- obj = i915_gem_object_create(dev, size);
+ obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return obj;
@@ -193,7 +193,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
- if (IS_IVYBRIDGE(to_i915(dev))) {
+ if (IS_IVYBRIDGE(dev_priv)) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
@@ -205,31 +205,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
return obj;
}
-static void i915_ppgtt_close(struct i915_address_space *vm)
-{
- struct list_head *phases[] = {
- &vm->active_list,
- &vm->inactive_list,
- &vm->unbound_list,
- NULL,
- }, **phase;
-
- GEM_BUG_ON(vm->closed);
- vm->closed = true;
-
- for (phase = phases; *phase; phase++) {
- struct i915_vma *vma, *vn;
-
- list_for_each_entry_safe(vma, vn, *phase, vm_link)
- if (!i915_vma_is_closed(vma))
- i915_vma_close(vma);
- }
-}
-
static void context_close(struct i915_gem_context *ctx)
{
- GEM_BUG_ON(ctx->closed);
- ctx->closed = true;
+ i915_gem_context_set_closed(ctx);
if (ctx->ppgtt)
i915_ppgtt_close(&ctx->ppgtt->base);
ctx->file_priv = ERR_PTR(-EBADF);
@@ -259,10 +237,9 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
}
static struct i915_gem_context *
-__create_hw_context(struct drm_device *dev,
+__create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gem_context *ctx;
int ret;
@@ -286,14 +263,13 @@ __create_hw_context(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = i915_gem_alloc_context_obj(dev,
- dev_priv->hw_context_size);
+ obj = alloc_context_obj(dev_priv, dev_priv->hw_context_size);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_out;
}
- vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
ret = PTR_ERR(vma);
@@ -331,12 +307,21 @@ __create_hw_context(struct drm_device *dev,
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
- ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
+ i915_gem_context_set_bannable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
+ /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
+ * present or not in use we still need a small bias as ring wraparound
+ * at offset 0 sometimes hangs. No idea why.
+ */
+ if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
+ ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
+ else
+ ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
+
return ctx;
err_pid:
@@ -353,21 +338,21 @@ err_out:
* well as an idle case.
*/
static struct i915_gem_context *
-i915_gem_create_context(struct drm_device *dev,
+i915_gem_create_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
- ctx = __create_hw_context(dev, file_priv);
+ ctx = __create_hw_context(dev_priv, file_priv);
if (IS_ERR(ctx))
return ctx;
- if (USES_FULL_PPGTT(dev)) {
+ if (USES_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
+ ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -407,35 +392,24 @@ i915_gem_context_create_gvt(struct drm_device *dev)
if (ret)
return ERR_PTR(ret);
- ctx = i915_gem_create_context(dev, NULL);
+ ctx = __create_hw_context(to_i915(dev), NULL);
if (IS_ERR(ctx))
goto out;
- ctx->execlists_force_single_submission = true;
+ ctx->file_priv = ERR_PTR(-EBADF);
+ i915_gem_context_set_closed(ctx); /* not user accessible */
+ i915_gem_context_clear_bannable(ctx);
+ i915_gem_context_set_force_single_submission(ctx);
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
+
+ GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
out:
mutex_unlock(&dev->struct_mutex);
return ctx;
}
-static void i915_gem_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- if (i915.enable_execlists) {
- intel_lr_context_unpin(ctx, engine);
- } else {
- struct intel_context *ce = &ctx->engine[engine->id];
-
- if (ce->state)
- i915_vma_unpin(ce->state);
-
- i915_gem_context_put(ctx);
- }
-}
-
-int i915_gem_context_init(struct drm_device *dev)
+int i915_gem_context_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gem_context *ctx;
/* Init should only be called once per module load. Eventually the
@@ -461,7 +435,8 @@ int i915_gem_context_init(struct drm_device *dev)
dev_priv->hw_context_size = 0;
} else if (HAS_HW_CONTEXTS(dev_priv)) {
dev_priv->hw_context_size =
- round_up(get_context_size(dev_priv), 4096);
+ round_up(get_context_size(dev_priv),
+ I915_GTT_PAGE_SIZE);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size);
@@ -469,16 +444,19 @@ int i915_gem_context_init(struct drm_device *dev)
}
}
- ctx = i915_gem_create_context(dev, NULL);
+ ctx = i915_gem_create_context(dev_priv, NULL);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context (error %ld)\n",
PTR_ERR(ctx));
return PTR_ERR(ctx);
}
+ i915_gem_context_clear_bannable(ctx);
ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
+ GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
dev_priv->hw_context_size ? "HW" : "fake");
@@ -493,10 +471,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) {
- if (engine->last_context) {
- i915_gem_context_unpin(engine->last_context, engine);
- engine->last_context = NULL;
- }
+ engine->legacy_active_context = NULL;
+
+ if (!engine->last_retired_context)
+ continue;
+
+ engine->context_unpin(engine, engine->last_retired_context);
+ engine->last_retired_context = NULL;
}
/* Force the GPU state to be restored on enabling */
@@ -522,12 +503,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
}
}
-void i915_gem_context_fini(struct drm_device *dev)
+void i915_gem_context_fini(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gem_context *dctx = dev_priv->kernel_context;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ GEM_BUG_ON(!i915_gem_context_is_kernel(dctx));
context_close(dctx);
dev_priv->kernel_context = NULL;
@@ -551,9 +533,11 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
idr_init(&file_priv->context_idr);
mutex_lock(&dev->struct_mutex);
- ctx = i915_gem_create_context(dev, file_priv);
+ ctx = i915_gem_create_context(to_i915(dev), file_priv);
mutex_unlock(&dev->struct_mutex);
+ GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+
if (IS_ERR(ctx)) {
idr_destroy(&file_priv->context_idr);
return PTR_ERR(ctx);
@@ -719,7 +703,7 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
- return to == engine->last_context;
+ return to == engine->legacy_active_context;
}
static bool
@@ -731,11 +715,11 @@ needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
return false;
/* Always load the ppgtt on first use */
- if (!engine->last_context)
+ if (!engine->legacy_active_context)
return true;
/* Same context without new entries, skip */
- if (engine->last_context == to &&
+ if (engine->legacy_active_context == to &&
!(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
@@ -765,57 +749,20 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
return false;
}
-struct i915_vma *
-i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
- unsigned int flags)
-{
- struct i915_vma *vma = ctx->engine[RCS].state;
- int ret;
-
- /* Clear this page out of any CPU caches for coherent swap-in/out.
- * We only want to do this on the first bind so that we do not stall
- * on an active context (which by nature is already on the GPU).
- */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
- if (ret)
- return ERR_PTR(ret);
- }
-
- ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
- if (ret)
- return ERR_PTR(ret);
-
- return vma;
-}
-
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- struct i915_vma *vma;
- struct i915_gem_context *from;
+ struct i915_gem_context *from = engine->legacy_active_context;
u32 hw_flags;
int ret, i;
+ GEM_BUG_ON(engine->id != RCS);
+
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
- /* Trying to pin first makes error handling easier. */
- vma = i915_gem_context_pin_legacy(to, 0);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- /*
- * Pin can switch back to the default context if we end up calling into
- * evict_everything - as a last ditch gtt defrag effort that also
- * switches to the default context. Hence we need to reload from here.
- *
- * XXX: Doing so is painfully broken!
- */
- from = engine->last_context;
-
if (needs_pd_load_pre(ppgtt, engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
@@ -824,7 +771,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
trace_switch_mm(engine, to);
ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
- goto err;
+ return ret;
}
if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
@@ -841,29 +788,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags);
if (ret)
- goto err;
- }
+ return ret;
- /* The backing object for the context is done after switching to the
- * *next* context. Therefore we cannot retire the previous context until
- * the next context has already started running. In fact, the below code
- * is a bit suboptimal because the retiring can occur simply after the
- * MI_SET_CONTEXT instead of when the next seqno has completed.
- */
- if (from != NULL) {
- /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
- * whole damn pipeline, we don't need to explicitly mark the
- * object dirty. The only exception is that the context must be
- * correct in case the object gets swapped out. Ideally we'd be
- * able to defer doing this until we know the object would be
- * swapped, but there is no way to do that yet.
- */
- i915_vma_move_to_active(from->engine[RCS].state, req, 0);
- /* state is kept alive until the next request */
- i915_vma_unpin(from->engine[RCS].state);
- i915_gem_context_put(from);
+ engine->legacy_active_context = to;
}
- engine->last_context = i915_gem_context_get(to);
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
@@ -904,10 +832,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
}
return 0;
-
-err:
- i915_vma_unpin(vma);
- return ret;
}
/**
@@ -947,18 +871,32 @@ int i915_switch_context(struct drm_i915_gem_request *req)
ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
- if (to != engine->last_context) {
- if (engine->last_context)
- i915_gem_context_put(engine->last_context);
- engine->last_context = i915_gem_context_get(to);
- }
-
return 0;
}
return do_rcs_switch(req);
}
+static bool engine_has_kernel_context(struct intel_engine_cs *engine)
+{
+ struct i915_gem_timeline *timeline;
+
+ list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
+ struct intel_timeline *tl;
+
+ if (timeline == &engine->i915->gt.global_timeline)
+ continue;
+
+ tl = &timeline->engine[engine->id];
+ if (i915_gem_active_peek(&tl->last_request,
+ &engine->i915->drm.struct_mutex))
+ return false;
+ }
+
+ return (!engine->last_retired_context ||
+ i915_gem_context_is_kernel(engine->last_retired_context));
+}
+
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
@@ -967,10 +905,15 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ i915_gem_retire_requests(dev_priv);
+
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
+ if (engine_has_kernel_context(engine))
+ continue;
+
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1003,6 +946,11 @@ static bool contexts_enabled(struct drm_device *dev)
return i915.enable_execlists || to_i915(dev)->hw_context_size;
}
+static bool client_is_banned(struct drm_i915_file_private *file_priv)
+{
+ return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS;
+}
+
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -1017,17 +965,27 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
+ if (client_is_banned(file_priv)) {
+ DRM_DEBUG("client %s[%d] banned from creating ctx\n",
+ current->comm,
+ pid_nr(get_task_pid(current, PIDTYPE_PID)));
+
+ return -EIO;
+ }
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- ctx = i915_gem_create_context(dev, file_priv);
+ ctx = i915_gem_create_context(to_i915(dev), file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+
args->ctx_id = ctx->user_handle;
- DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
+ DRM_DEBUG("HW context %d created\n", args->ctx_id);
return 0;
}
@@ -1060,7 +1018,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
context_close(ctx);
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
+ DRM_DEBUG("HW context %d destroyed\n", args->ctx_id);
return 0;
}
@@ -1085,7 +1043,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->size = 0;
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
- args->value = ctx->hang_stats.ban_period_seconds;
+ ret = -EINVAL;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
@@ -1099,7 +1057,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = to_i915(dev)->ggtt.base.total;
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
- args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
+ args->value = i915_gem_context_no_error_capture(ctx);
+ break;
+ case I915_CONTEXT_PARAM_BANNABLE:
+ args->value = i915_gem_context_is_bannable(ctx);
break;
default:
ret = -EINVAL;
@@ -1130,13 +1091,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
- if (args->size)
- ret = -EINVAL;
- else if (args->value < ctx->hang_stats.ban_period_seconds &&
- !capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else
- ctx->hang_stats.ban_period_seconds = args->value;
+ ret = -EINVAL;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
if (args->size) {
@@ -1147,14 +1102,22 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
}
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
- if (args->size) {
+ if (args->size)
ret = -EINVAL;
- } else {
- if (args->value)
- ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
- else
- ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
- }
+ else if (args->value)
+ i915_gem_context_set_no_error_capture(ctx);
+ else
+ i915_gem_context_clear_no_error_capture(ctx);
+ break;
+ case I915_CONTEXT_PARAM_BANNABLE:
+ if (args->size)
+ ret = -EINVAL;
+ else if (!capable(CAP_SYS_ADMIN) && !args->value)
+ ret = -EPERM;
+ else if (args->value)
+ i915_gem_context_set_bannable(ctx);
+ else
+ i915_gem_context_clear_bannable(ctx);
break;
default:
ret = -EINVAL;
@@ -1170,7 +1133,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reset_stats *args = data;
- struct i915_ctx_hang_stats *hs;
struct i915_gem_context *ctx;
int ret;
@@ -1189,15 +1151,14 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
- hs = &ctx->hang_stats;
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
else
args->reset_count = 0;
- args->batch_active = hs->batch_active;
- args->batch_pending = hs->batch_pending;
+ args->batch_active = ctx->guilty_count;
+ args->batch_pending = ctx->active_count;
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
new file mode 100644
index 000000000000..0ac750b90f3d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_CONTEXT_H__
+#define __I915_GEM_CONTEXT_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+
+struct pid;
+
+struct drm_device;
+struct drm_file;
+
+struct drm_i915_private;
+struct drm_i915_file_private;
+struct i915_hw_ppgtt;
+struct i915_vma;
+struct intel_ring;
+
+#define DEFAULT_CONTEXT_HANDLE 0
+
+/**
+ * struct i915_gem_context - client state
+ *
+ * The struct i915_gem_context represents the combined view of the driver and
+ * logical hardware state for a particular client.
+ */
+struct i915_gem_context {
+ /** i915: i915 device backpointer */
+ struct drm_i915_private *i915;
+
+ /** file_priv: owning file descriptor */
+ struct drm_i915_file_private *file_priv;
+
+ /**
+ * @ppgtt: unique address space (GTT)
+ *
+ * In full-ppgtt mode, each context has its own address space ensuring
+ * complete seperation of one client from all others.
+ *
+ * In other modes, this is a NULL pointer with the expectation that
+ * the caller uses the shared global GTT.
+ */
+ struct i915_hw_ppgtt *ppgtt;
+
+ /**
+ * @pid: process id of creator
+ *
+ * Note that who created the context may not be the principle user,
+ * as the context may be shared across a local socket. However,
+ * that should only affect the default context, all contexts created
+ * explicitly by the client are expected to be isolated.
+ */
+ struct pid *pid;
+
+ /**
+ * @name: arbitrary name
+ *
+ * A name is constructed for the context from the creator's process
+ * name, pid and user handle in order to uniquely identify the
+ * context in messages.
+ */
+ const char *name;
+
+ /** link: place with &drm_i915_private.context_list */
+ struct list_head link;
+
+ /**
+ * @ref: reference count
+ *
+ * A reference to a context is held by both the client who created it
+ * and on each request submitted to the hardware using the request
+ * (to ensure the hardware has access to the state until it has
+ * finished all pending writes). See i915_gem_context_get() and
+ * i915_gem_context_put() for access.
+ */
+ struct kref ref;
+
+ /**
+ * @flags: small set of booleans
+ */
+ unsigned long flags;
+#define CONTEXT_NO_ZEROMAP BIT(0)
+#define CONTEXT_NO_ERROR_CAPTURE 1
+#define CONTEXT_CLOSED 2
+#define CONTEXT_BANNABLE 3
+#define CONTEXT_BANNED 4
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
+
+ /**
+ * @hw_id: - unique identifier for the context
+ *
+ * The hardware needs to uniquely identify the context for a few
+ * functions like fault reporting, PASID, scheduling. The
+ * &drm_i915_private.context_hw_ida is used to assign a unqiue
+ * id for the lifetime of the context.
+ */
+ unsigned int hw_id;
+
+ /**
+ * @user_handle: userspace identifier
+ *
+ * A unique per-file identifier is generated from
+ * &drm_i915_file_private.contexts.
+ */
+ u32 user_handle;
+
+ /**
+ * @priority: execution and service priority
+ *
+ * All clients are equal, but some are more equal than others!
+ *
+ * Requests from a context with a greater (more positive) value of
+ * @priority will be executed before those with a lower @priority
+ * value, forming a simple QoS.
+ *
+ * The &drm_i915_private.kernel_context is assigned the lowest priority.
+ */
+ int priority;
+
+ /** ggtt_alignment: alignment restriction for context objects */
+ u32 ggtt_alignment;
+ /** ggtt_offset_bias: placement restriction for context objects */
+ u32 ggtt_offset_bias;
+
+ /** engine: per-engine logical HW state */
+ struct intel_context {
+ struct i915_vma *state;
+ struct intel_ring *ring;
+ u32 *lrc_reg_state;
+ u64 lrc_desc;
+ int pin_count;
+ bool initialised;
+ } engine[I915_NUM_ENGINES];
+
+ /** ring_size: size for allocating the per-engine ring buffer */
+ u32 ring_size;
+ /** desc_template: invariant fields for the HW context descriptor */
+ u32 desc_template;
+
+ /** status_notifier: list of callbacks for context-switch changes */
+ struct atomic_notifier_head status_notifier;
+
+ /** guilty_count: How many times this context has caused a GPU hang. */
+ unsigned int guilty_count;
+ /**
+ * @active_count: How many times this context was active during a GPU
+ * hang, but did not cause it.
+ */
+ unsigned int active_count;
+
+#define CONTEXT_SCORE_GUILTY 10
+#define CONTEXT_SCORE_BAN_THRESHOLD 40
+ /** ban_score: Accumulated score of all hangs caused by this context. */
+ int ban_score;
+
+ /** remap_slice: Bitmask of cache lines that need remapping */
+ u8 remap_slice;
+};
+
+static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_CLOSED, &ctx->flags);
+}
+
+static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
+{
+ GEM_BUG_ON(i915_gem_context_is_closed(ctx));
+ __set_bit(CONTEXT_CLOSED, &ctx->flags);
+}
+
+static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
+}
+
+static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
+{
+ __set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
+}
+
+static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
+{
+ __clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
+}
+
+static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_BANNABLE, &ctx->flags);
+}
+
+static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
+{
+ __set_bit(CONTEXT_BANNABLE, &ctx->flags);
+}
+
+static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
+{
+ __clear_bit(CONTEXT_BANNABLE, &ctx->flags);
+}
+
+static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_BANNED, &ctx->flags);
+}
+
+static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
+{
+ __set_bit(CONTEXT_BANNED, &ctx->flags);
+}
+
+static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
+}
+
+static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
+{
+ __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
+}
+
+static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
+{
+ return c->user_handle == DEFAULT_CONTEXT_HANDLE;
+}
+
+static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
+{
+ return !ctx->file_priv;
+}
+
+/* i915_gem_context.c */
+int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv);
+void i915_gem_context_lost(struct drm_i915_private *dev_priv);
+void i915_gem_context_fini(struct drm_i915_private *dev_priv);
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int i915_switch_context(struct drm_i915_gem_request *req);
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+void i915_gem_context_free(struct kref *ctx_ref);
+struct i915_gem_context *
+i915_gem_context_create_gvt(struct drm_device *dev);
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 5e38299b5df6..d037adcda6f2 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -278,7 +278,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- obj = i915_gem_object_alloc(dev);
+ obj = i915_gem_object_alloc(to_i915(dev));
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d534a316a16e..c181b1bb3d2c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -51,7 +51,10 @@ static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
}
static bool
-mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
+mark_free(struct drm_mm_scan *scan,
+ struct i915_vma *vma,
+ unsigned int flags,
+ struct list_head *unwind)
{
if (i915_vma_is_pinned(vma))
return false;
@@ -63,7 +66,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
return false;
list_add(&vma->exec_list, unwind);
- return drm_mm_scan_add_block(&vma->node);
+ return drm_mm_scan_add_block(scan, &vma->node);
}
/**
@@ -96,7 +99,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
u64 start, u64 end,
unsigned flags)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
+ struct drm_mm_scan scan;
struct list_head eviction_list;
struct list_head *phases[] = {
&vm->inactive_list,
@@ -104,9 +108,11 @@ i915_gem_evict_something(struct i915_address_space *vm,
NULL,
}, **phase;
struct i915_vma *vma, *next;
+ struct drm_mm_node *node;
+ enum drm_mm_insert_mode mode;
int ret;
- lockdep_assert_held(&vm->dev->struct_mutex);
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@@ -122,14 +128,23 @@ i915_gem_evict_something(struct i915_address_space *vm,
* On each list, the oldest objects lie at the HEAD with the freshest
* object on the TAIL.
*/
- if (start != 0 || end != vm->total) {
- drm_mm_init_scan_with_range(&vm->mm, min_size,
- alignment, cache_level,
- start, end);
- } else
- drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
-
- if (flags & PIN_NONBLOCK)
+ mode = DRM_MM_INSERT_BEST;
+ if (flags & PIN_HIGH)
+ mode = DRM_MM_INSERT_HIGH;
+ if (flags & PIN_MAPPABLE)
+ mode = DRM_MM_INSERT_LOW;
+ drm_mm_scan_init_with_range(&scan, &vm->mm,
+ min_size, alignment, cache_level,
+ start, end, mode);
+
+ /* Retire before we search the active list. Although we have
+ * reasonable accuracy in our retirement lists, we may have
+ * a stray pin (preventing eviction) that can only be resolved by
+ * retiring.
+ */
+ if (!(flags & PIN_NONBLOCK))
+ i915_gem_retire_requests(dev_priv);
+ else
phases[1] = NULL;
search_again:
@@ -137,13 +152,13 @@ search_again:
phase = phases;
do {
list_for_each_entry(vma, *phase, vm_link)
- if (mark_free(vma, flags, &eviction_list))
+ if (mark_free(&scan, vma, flags, &eviction_list))
goto found;
} while (*++phase);
/* Nothing found, clean up and bail out! */
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
- ret = drm_mm_scan_remove_block(&vma->node);
+ ret = drm_mm_scan_remove_block(&scan, &vma->node);
BUG_ON(ret);
INIT_LIST_HEAD(&vma->exec_list);
@@ -162,7 +177,7 @@ search_again:
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
*/
- return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
+ return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
}
/* Not everything in the GGTT is tracked via vma (otherwise we
@@ -192,7 +207,7 @@ found:
* of any of our objects, thus corrupting the list).
*/
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
- if (drm_mm_scan_remove_block(&vma->node))
+ if (drm_mm_scan_remove_block(&scan, &vma->node))
__i915_vma_pin(vma);
else
list_del_init(&vma->exec_list);
@@ -210,48 +225,119 @@ found:
if (ret == 0)
ret = i915_vma_unbind(vma);
}
+
+ while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
+ vma = container_of(node, struct i915_vma, node);
+ ret = i915_vma_unbind(vma);
+ }
+
return ret;
}
-int
-i915_gem_evict_for_vma(struct i915_vma *target)
+/**
+ * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
+ * @vm: address space to evict from
+ * @target: range (and color) to evict for
+ * @flags: additional flags to control the eviction algorithm
+ *
+ * This function will try to evict vmas that overlap the target node.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
+ */
+int i915_gem_evict_for_node(struct i915_address_space *vm,
+ struct drm_mm_node *target,
+ unsigned int flags)
{
- struct drm_mm_node *node, *next;
+ LIST_HEAD(eviction_list);
+ struct drm_mm_node *node;
+ u64 start = target->start;
+ u64 end = start + target->size;
+ struct i915_vma *vma, *next;
+ bool check_color;
+ int ret = 0;
- lockdep_assert_held(&target->vm->dev->struct_mutex);
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ trace_i915_gem_evict_node(vm, target, flags);
- list_for_each_entry_safe(node, next,
- &target->vm->mm.head_node.node_list,
- node_list) {
- struct i915_vma *vma;
- int ret;
+ /* Retire before we search the active list. Although we have
+ * reasonable accuracy in our retirement lists, we may have
+ * a stray pin (preventing eviction) that can only be resolved by
+ * retiring.
+ */
+ if (!(flags & PIN_NONBLOCK))
+ i915_gem_retire_requests(vm->i915);
+
+ check_color = vm->mm.color_adjust;
+ if (check_color) {
+ /* Expand search to cover neighbouring guard pages (or lack!) */
+ if (start > vm->start)
+ start -= I915_GTT_PAGE_SIZE;
+ if (end < vm->start + vm->total)
+ end += I915_GTT_PAGE_SIZE;
+ }
- if (node->start + node->size <= target->node.start)
- continue;
- if (node->start >= target->node.start + target->node.size)
+ drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
+ /* If we find any non-objects (!vma), we cannot evict them */
+ if (node->color == I915_COLOR_UNEVICTABLE) {
+ ret = -ENOSPC;
break;
+ }
vma = container_of(node, typeof(*vma), node);
- if (i915_vma_is_pinned(vma)) {
- if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
- /* Object is pinned for some other use */
- return -EBUSY;
+ /* If we are using coloring to insert guard pages between
+ * different cache domains within the address space, we have
+ * to check whether the objects on either side of our range
+ * abutt and conflict. If they are in conflict, then we evict
+ * those as well to make room for our guard pages.
+ */
+ if (check_color) {
+ if (vma->node.start + vma->node.size == node->start) {
+ if (vma->node.color == node->color)
+ continue;
+ }
+ if (vma->node.start == node->start + node->size) {
+ if (vma->node.color == node->color)
+ continue;
+ }
+ }
- /* We need to evict a buffer in the same batch */
- if (vma->exec_entry->flags & EXEC_OBJECT_PINNED)
- /* Overlapping fixed objects in the same batch */
- return -EINVAL;
+ if (flags & PIN_NONBLOCK &&
+ (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
+ ret = -ENOSPC;
+ break;
+ }
- return -ENOSPC;
+ /* Overlap of objects in the same batch? */
+ if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) {
+ ret = -ENOSPC;
+ if (vma->exec_entry &&
+ vma->exec_entry->flags & EXEC_OBJECT_PINNED)
+ ret = -EINVAL;
+ break;
}
- ret = i915_vma_unbind(vma);
- if (ret)
- return ret;
+ /* Never show fear in the face of dragons!
+ *
+ * We cannot directly remove this node from within this
+ * iterator and as with i915_gem_evict_something() we employ
+ * the vma pin_count in order to prevent the action of
+ * unbinding one vma from freeing (by dropping its active
+ * reference) another in our eviction list.
+ */
+ __i915_vma_pin(vma);
+ list_add(&vma->exec_list, &eviction_list);
}
- return 0;
+ list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+ list_del_init(&vma->exec_list);
+ __i915_vma_unpin(vma);
+ if (ret == 0)
+ ret = i915_vma_unbind(vma);
+ }
+
+ return ret;
}
/**
@@ -273,11 +359,11 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next;
int ret;
- lockdep_assert_held(&vm->dev->struct_mutex);
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict_vm(vm);
if (do_idle) {
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
if (i915_is_ggtt(vm)) {
ret = i915_gem_switch_to_kernel_context(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b8b877c91b0a..d02cfaefe1c8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -184,7 +184,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (unlikely(IS_ERR(vma))) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
@@ -274,6 +274,7 @@ static void eb_destroy(struct eb_vmas *eb)
exec_list);
list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
+ vma->exec_entry = NULL;
i915_vma_put(vma);
}
kfree(eb);
@@ -435,12 +436,11 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
PIN_MAPPABLE | PIN_NONBLOCK);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
- ret = drm_mm_insert_node_in_range_generic
+ ret = drm_mm_insert_node_in_range
(&ggtt->base.mm, &cache->node,
- 4096, 0, 0,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
- DRM_MM_SEARCH_DEFAULT,
- DRM_MM_CREATE_DEFAULT);
+ DRM_MM_INSERT_LOW);
if (ret) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
@@ -850,8 +850,7 @@ eb_vma_misplaced(struct i915_vma *vma)
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_ggtt(vma));
- if (entry->alignment &&
- vma->node.start & (entry->alignment - 1))
+ if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
return true;
if (vma->node.size < entry->pad_to_size)
@@ -1232,14 +1231,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *engine, const u32 ctx_id)
{
struct i915_gem_context *ctx;
- struct i915_ctx_hang_stats *hs;
ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
if (IS_ERR(ctx))
return ctx;
- hs = &ctx->hang_stats;
- if (hs->banned) {
+ if (i915_gem_context_is_banned(ctx)) {
DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
return ERR_PTR(-EIO);
}
@@ -1260,6 +1257,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
const unsigned int idx = req->engine->id;
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
/* Add a reference if we're newly entering the active list.
@@ -1715,7 +1713,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
params->args_batch_start_offset = args->batch_start_offset;
- if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
+ if (engine->needs_cmd_parser && args->batch_len) {
struct i915_vma *vma;
vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 0efa3571afc3..fadbe8f4c745 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -77,16 +77,17 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
val = 0;
if (vma) {
- unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
- bool is_y_tiled = tiling == I915_TILING_Y;
unsigned int stride = i915_gem_object_get_stride(vma->obj);
- u32 row_size = stride * (is_y_tiled ? 32 : 8);
- u32 size = rounddown((u32)vma->node.size, row_size);
- val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
- val |= vma->node.start & 0xfffff000;
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
+ GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
+ GEM_BUG_ON(!IS_ALIGNED(stride, 128));
+
+ val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
+ val |= vma->node.start;
val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
- if (is_y_tiled)
+ if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
val |= BIT(I965_FENCE_TILING_Y_SHIFT);
val |= I965_FENCE_REG_VALID;
}
@@ -122,31 +123,24 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
bool is_y_tiled = tiling == I915_TILING_Y;
unsigned int stride = i915_gem_object_get_stride(vma->obj);
- int pitch_val;
- int tile_width;
- WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
- !is_power_of_2(vma->node.size) ||
- (vma->node.start & (vma->node.size - 1)),
- "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
- vma->node.start,
- i915_vma_is_map_and_fenceable(vma),
- vma->node.size);
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
+ GEM_BUG_ON(!is_power_of_2(vma->fence_size));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
- tile_width = 128;
+ stride /= 128;
else
- tile_width = 512;
-
- /* Note: pitch better be a power of two tile widths */
- pitch_val = stride / tile_width;
- pitch_val = ffs(pitch_val) - 1;
+ stride /= 512;
+ GEM_BUG_ON(!is_power_of_2(stride));
val = vma->node.start;
if (is_y_tiled)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
- val |= I915_FENCE_SIZE_BITS(vma->node.size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(vma->fence_size);
+ val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
+
val |= I830_FENCE_REG_VALID;
}
@@ -166,25 +160,19 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
val = 0;
if (vma) {
- unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
- bool is_y_tiled = tiling == I915_TILING_Y;
unsigned int stride = i915_gem_object_get_stride(vma->obj);
- u32 pitch_val;
-
- WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
- !is_power_of_2(vma->node.size) ||
- (vma->node.start & (vma->node.size - 1)),
- "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n",
- vma->node.start, vma->node.size);
- pitch_val = stride / 128;
- pitch_val = ffs(pitch_val) - 1;
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
+ GEM_BUG_ON(!is_power_of_2(vma->fence_size));
+ GEM_BUG_ON(!is_power_of_2(stride / 128));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
val = vma->node.start;
- if (is_y_tiled)
+ if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
- val |= I830_FENCE_SIZE_BITS(vma->node.size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(vma->fence_size);
+ val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
}
@@ -290,7 +278,7 @@ i915_vma_put_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence = vma->fence;
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+ assert_rpm_wakelock_held(vma->vm->i915);
if (!fence)
return 0;
@@ -313,7 +301,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(&dev_priv->drm))
+ if (intel_has_pending_fb_unpin(dev_priv))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
@@ -346,7 +334,7 @@ i915_vma_get_fence(struct i915_vma *vma)
/* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+ assert_rpm_wakelock_held(vma->vm->i915);
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
@@ -357,7 +345,7 @@ i915_vma_get_fence(struct i915_vma *vma)
return 0;
}
} else if (set) {
- fence = fence_find(to_i915(vma->vm->dev));
+ fence = fence_find(vma->vm->i915);
if (IS_ERR(fence))
return PTR_ERR(fence);
} else
@@ -367,6 +355,30 @@ i915_vma_get_fence(struct i915_vma *vma)
}
/**
+ * i915_gem_revoke_fences - revoke fence state
+ * @dev_priv: i915 device private
+ *
+ * Removes all GTT mmappings via the fence registers. This forces any user
+ * of the fence to reacquire that fence before continuing with their access.
+ * One use is during GPU reset where the fence register is lost and we need to
+ * revoke concurrent userspace access via GTT mmaps until the hardware has been
+ * reset and the fence registers have been restored.
+ */
+void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
+
+ if (fence->vma)
+ i915_gem_release_mmap(fence->vma->obj);
+ }
+}
+
+/**
* i915_gem_restore_fences - restore fence state
* @dev_priv: i915 device private
*
@@ -512,8 +524,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) &&
- !IS_G33(dev_priv))) {
+ } else if (IS_MOBILE(dev_priv) ||
+ IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
uint32_t dcc;
/* On 9xx chipsets, channel interleave by the CPU is
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 22c4a2d01adf..99a31ded4dfd 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -30,6 +30,8 @@
struct drm_i915_private;
struct i915_vma;
+#define I965_FENCE_PAGE 4096UL
+
struct drm_i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b4bde1452f2a..2801a4d56324 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -23,10 +23,14 @@
*
*/
+#include <linux/log2.h>
+#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/stop_machine.h>
+
#include <drm/drmP.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
@@ -99,12 +103,29 @@
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
-const struct i915_ggtt_view i915_ggtt_view_normal = {
- .type = I915_GGTT_VIEW_NORMAL,
-};
-const struct i915_ggtt_view i915_ggtt_view_rotated = {
- .type = I915_GGTT_VIEW_ROTATED,
-};
+static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
+{
+ /* Note that as an uncached mmio write, this should flush the
+ * WCB of the writes into the GGTT before it triggers the invalidate.
+ */
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+}
+
+static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
+{
+ gen6_ggtt_invalidate(dev_priv);
+ I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+}
+
+static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
+{
+ intel_gtt_chipset_flush();
+}
+
+static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
+{
+ i915->ggtt.invalidate(i915);
+}
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt)
@@ -113,10 +134,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
- has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
- has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
- has_full_48bit_ppgtt =
- IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
+ has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
+ has_full_ppgtt = dev_priv->info.has_full_ppgtt;
+ has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
if (intel_vgpu_active(dev_priv)) {
/* emulation is too hard */
@@ -330,7 +350,7 @@ static int __setup_page_dma(struct drm_i915_private *dev_priv,
return -ENOMEM;
p->daddr = dma_map_page(kdev,
- p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, p->daddr)) {
__free_page(p->page);
@@ -354,7 +374,7 @@ static void cleanup_page_dma(struct drm_i915_private *dev_priv,
if (WARN_ON(!p->page))
return;
- dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(p->page);
memset(p, 0, sizeof(*p));
}
@@ -372,7 +392,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
+ if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
@@ -380,7 +400,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
#define kmap_px(px) kmap_page_dma(px_base(px))
#define kunmap_px(ppgtt, vaddr) \
- kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
+ kunmap_page_dma((ppgtt)->base.i915, (vaddr))
#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
@@ -470,7 +490,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC);
- fill_px(to_i915(vm->dev), pt, scratch_pte);
+ fill_px(vm->i915, pt, scratch_pte);
}
static void gen6_initialize_pt(struct i915_address_space *vm,
@@ -483,7 +503,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, 0);
- fill32_px(to_i915(vm->dev), pt, scratch_pte);
+ fill32_px(vm->i915, pt, scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
@@ -531,7 +551,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
- fill_px(to_i915(vm->dev), pd, scratch_pde);
+ fill_px(vm->i915, pd, scratch_pde);
}
static int __pdp_init(struct drm_i915_private *dev_priv,
@@ -612,7 +632,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
+ fill_px(vm->i915, pdp, scratch_pdpe);
}
static void gen8_initialize_pml4(struct i915_address_space *vm,
@@ -623,14 +643,14 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
I915_CACHE_LLC);
- fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
+ fill_px(vm->i915, pml4, scratch_pml4e);
}
static void
-gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
- struct i915_page_directory_pointer *pdp,
- struct i915_page_directory *pd,
- int index)
+gen8_setup_pdpe(struct i915_hw_ppgtt *ppgtt,
+ struct i915_page_directory_pointer *pdp,
+ struct i915_page_directory *pd,
+ int index)
{
gen8_ppgtt_pdpe_t *page_directorypo;
@@ -643,10 +663,10 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
}
static void
-gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
- struct i915_pml4 *pml4,
- struct i915_page_directory_pointer *pdp,
- int index)
+gen8_setup_pml4e(struct i915_hw_ppgtt *ppgtt,
+ struct i915_pml4 *pml4,
+ struct i915_page_directory_pointer *pdp,
+ int index)
{
gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
@@ -710,7 +730,7 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask;
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -735,10 +755,9 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
GEM_BUG_ON(pte_end > GEN8_PTES);
bitmap_clear(pt->used_ptes, pte, num_entries);
-
- if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
- free_pt(to_i915(vm->dev), pt);
- return true;
+ if (USES_FULL_PPGTT(vm->i915)) {
+ if (bitmap_empty(pt->used_ptes, GEN8_PTES))
+ return true;
}
pt_vaddr = kmap_px(pt);
@@ -775,13 +794,12 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
pde_vaddr = kmap_px(pd);
pde_vaddr[pde] = scratch_pde;
kunmap_px(ppgtt, pde_vaddr);
+ free_pt(vm->i915, pt);
}
}
- if (bitmap_empty(pd->used_pdes, I915_PDES)) {
- free_pd(to_i915(vm->dev), pd);
+ if (bitmap_empty(pd->used_pdes, I915_PDES))
return true;
- }
return false;
}
@@ -795,12 +813,8 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
uint64_t pdpe;
- gen8_ppgtt_pdpe_t *pdpe_vaddr;
- gen8_ppgtt_pdpe_t scratch_pdpe =
- gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (WARN_ON(!pdp->page_directory[pdpe]))
@@ -808,21 +822,15 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
__clear_bit(pdpe, pdp->used_pdpes);
- if (USES_FULL_48BIT_PPGTT(dev_priv)) {
- pdpe_vaddr = kmap_px(pdp);
- pdpe_vaddr[pdpe] = scratch_pdpe;
- kunmap_px(ppgtt, pdpe_vaddr);
- }
+ gen8_setup_pdpe(ppgtt, pdp, vm->scratch_pd, pdpe);
+ free_pd(vm->i915, pd);
}
}
mark_tlbs_dirty(ppgtt);
- if (USES_FULL_48BIT_PPGTT(dev_priv) &&
- bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) {
- free_pdp(dev_priv, pdp);
+ if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)))
return true;
- }
return false;
}
@@ -839,11 +847,8 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory_pointer *pdp;
uint64_t pml4e;
- gen8_ppgtt_pml4e_t *pml4e_vaddr;
- gen8_ppgtt_pml4e_t scratch_pml4e =
- gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
- GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev)));
+ GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (WARN_ON(!pml4->pdps[pml4e]))
@@ -851,9 +856,8 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
__clear_bit(pml4e, pml4->used_pml4es);
- pml4e_vaddr = kmap_px(pml4);
- pml4e_vaddr[pml4e] = scratch_pml4e;
- kunmap_px(ppgtt, pml4e_vaddr);
+ gen8_setup_pml4e(ppgtt, pml4, vm->scratch_pdp, pml4e);
+ free_pdp(vm->i915, pdp);
}
}
}
@@ -863,7 +867,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
+ if (USES_FULL_48BIT_PPGTT(vm->i915))
gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
else
gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
@@ -898,7 +902,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev)))
+ if (++pdpe == I915_PDPES_PER_PDP(vm->i915))
break;
pde = 0;
}
@@ -921,7 +925,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
- if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
+ if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
cache_level);
} else {
@@ -955,7 +959,7 @@ static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
static int gen8_init_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
int ret;
ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
@@ -1002,7 +1006,7 @@ free_scratch_page:
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
enum vgt_g2v_type msg;
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+ struct drm_i915_private *dev_priv = ppgtt->base.i915;
int i;
if (USES_FULL_48BIT_PPGTT(dev_priv)) {
@@ -1032,7 +1036,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
if (USES_FULL_48BIT_PPGTT(dev_priv))
free_pdp(dev_priv, vm->scratch_pdp);
@@ -1059,7 +1063,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+ struct drm_i915_private *dev_priv = ppgtt->base.i915;
int i;
for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
@@ -1074,7 +1078,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
if (intel_vgpu_active(dev_priv))
@@ -1112,7 +1116,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pts)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_table *pt;
uint32_t pde;
@@ -1173,7 +1177,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pds)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory *pd;
uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
@@ -1226,7 +1230,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pdps)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory_pointer *pdp;
uint32_t pml4e;
@@ -1301,7 +1305,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory *pd;
const uint64_t orig_start = start;
const uint64_t orig_length = length;
@@ -1309,15 +1313,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
int ret;
- /* Wrap is never okay since we can only represent 48b, and we don't
- * actually use the other side of the canonical address space.
- */
- if (WARN_ON(start + length < start))
- return -ENODEV;
-
- if (WARN_ON(start + length > vm->total))
- return -ENODEV;
-
ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
if (ret)
return ret;
@@ -1381,7 +1376,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
kunmap_px(ppgtt, page_directory);
__set_bit(pdpe, pdp->used_pdpes);
- gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
+ gen8_setup_pdpe(ppgtt, pdp, pd, pdpe);
}
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
@@ -1440,7 +1435,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
if (ret)
goto err_out;
- gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
+ gen8_setup_pml4e(ppgtt, pml4, pdp, pml4e);
}
bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
@@ -1450,7 +1445,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
err_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]);
+ gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]);
return ret;
}
@@ -1460,7 +1455,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
+ if (USES_FULL_48BIT_PPGTT(vm->i915))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
else
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
@@ -1531,7 +1526,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
+ if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else {
uint64_t pml4e;
@@ -1584,7 +1579,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+ struct drm_i915_private *dev_priv = ppgtt->base.i915;
int ret;
ret = gen8_init_scratch(&ppgtt->base);
@@ -1927,7 +1922,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
uint64_t start_in, uint64_t length_in)
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
@@ -1935,9 +1930,6 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
uint32_t pde;
int ret;
- if (WARN_ON(start_in + length_in > ppgtt->base.total))
- return -ENODEV;
-
start = start_save = start_in;
length = length_save = length_in;
@@ -2014,7 +2006,7 @@ unwind_out:
static int gen6_init_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
int ret;
ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
@@ -2034,7 +2026,7 @@ static int gen6_init_scratch(struct i915_address_space *vm)
static void gen6_free_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
free_pt(dev_priv, vm->scratch_pt);
cleanup_scratch_page(dev_priv, &vm->scratch_page);
@@ -2044,7 +2036,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd = &ppgtt->pd;
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_table *pt;
uint32_t pde;
@@ -2060,9 +2052,8 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+ struct drm_i915_private *dev_priv = ppgtt->base.i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool retried = false;
int ret;
/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
@@ -2075,29 +2066,14 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
-alloc:
- ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
- &ppgtt->node, GEN6_PD_SIZE,
- GEN6_PD_ALIGN, 0,
- 0, ggtt->base.total,
- DRM_MM_TOPDOWN);
- if (ret == -ENOSPC && !retried) {
- ret = i915_gem_evict_something(&ggtt->base,
- GEN6_PD_SIZE, GEN6_PD_ALIGN,
- I915_CACHE_NONE,
- 0, ggtt->base.total,
- 0);
- if (ret)
- goto err_out;
-
- retried = true;
- goto alloc;
- }
-
+ ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
+ GEN6_PD_SIZE, GEN6_PD_ALIGN,
+ I915_COLOR_UNEVICTABLE,
+ 0, ggtt->base.total,
+ PIN_HIGH);
if (ret)
goto err_out;
-
if (ppgtt->node.start < ggtt->mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
@@ -2125,7 +2101,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+ struct drm_i915_private *dev_priv = ppgtt->base.i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
@@ -2176,7 +2152,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_private *dev_priv)
{
- ppgtt->base.dev = &dev_priv->drm;
+ ppgtt->base.i915 = dev_priv;
if (INTEL_INFO(dev_priv)->gen < 8)
return gen6_ppgtt_init(ppgtt);
@@ -2285,6 +2261,27 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
return ppgtt;
}
+void i915_ppgtt_close(struct i915_address_space *vm)
+{
+ struct list_head *phases[] = {
+ &vm->active_list,
+ &vm->inactive_list,
+ &vm->unbound_list,
+ NULL,
+ }, **phase;
+
+ GEM_BUG_ON(vm->closed);
+ vm->closed = true;
+
+ for (phase = phases; *phase; phase++) {
+ struct i915_vma *vma, *vn;
+
+ list_for_each_entry_safe(vma, vn, *phase, vm_link)
+ if (!i915_vma_is_closed(vma))
+ i915_vma_close(vma);
+ }
+}
+
void i915_ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
@@ -2349,16 +2346,6 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
-static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
-{
- if (INTEL_INFO(dev_priv)->gen < 6) {
- intel_gtt_chipset_flush();
- } else {
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
- }
-}
-
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
@@ -2373,16 +2360,30 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
- i915_ggtt_flush(dev_priv);
+ i915_ggtt_invalidate(dev_priv);
}
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- if (dma_map_sg(&obj->base.dev->pdev->dev,
- pages->sgl, pages->nents,
- PCI_DMA_BIDIRECTIONAL))
- return 0;
+ do {
+ if (dma_map_sg(&obj->base.dev->pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return 0;
+
+ /* If the DMA remap fails, one cause can be that we have
+ * too many objects pinned in a small remapping table,
+ * such as swiotlb. Incrementally purge all other objects and
+ * try again - if there are no more pages to remove from
+ * the DMA remapper, i915_gem_shrink will return 0.
+ */
+ GEM_BUG_ON(obj->mm.pages == pages);
+ } while (i915_gem_shrink(to_i915(obj->base.dev),
+ obj->base.size >> PAGE_SHIFT,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE));
return -ENOSPC;
}
@@ -2398,15 +2399,13 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level level,
u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
- (offset >> PAGE_SHIFT);
+ (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
gen8_set_pte(pte, gen8_pte_encode(addr, level));
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ ggtt->invalidate(vm->i915);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2414,7 +2413,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level level, u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
@@ -2443,8 +2441,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ ggtt->invalidate(vm->i915);
}
struct insert_entries {
@@ -2479,15 +2476,13 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level level,
u32 flags)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
- (offset >> PAGE_SHIFT);
+ (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
iowrite32(vm->pte_encode(addr, level, flags), pte);
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ ggtt->invalidate(vm->i915);
}
/*
@@ -2501,7 +2496,6 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level level, u32 flags)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen6_pte_t __iomem *gtt_entries;
@@ -2529,8 +2523,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ ggtt->invalidate(vm->i915);
}
static void nop_clear_range(struct i915_address_space *vm,
@@ -2621,7 +2614,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags = 0;
int ret;
@@ -2653,7 +2646,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct drm_i915_private *i915 = vma->vm->i915;
u32 pte_flags;
int ret;
@@ -2687,7 +2680,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct drm_i915_private *i915 = vma->vm->i915;
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size);
@@ -2721,19 +2714,17 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
-static void i915_gtt_color_adjust(struct drm_mm_node *node,
+static void i915_gtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
u64 *end)
{
if (node->color != color)
- *start += 4096;
+ *start += I915_GTT_PAGE_SIZE;
- node = list_first_entry_or_null(&node->node_list,
- struct drm_mm_node,
- node_list);
- if (node && node->allocated && node->color != color)
- *end -= 4096;
+ node = list_next_entry(node, node_list);
+ if (node->allocated && node->color != color)
+ *end -= I915_GTT_PAGE_SIZE;
}
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2758,11 +2749,10 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
- &ggtt->error_capture,
- 4096, 0, -1,
- 0, ggtt->mappable_end,
- 0, 0);
+ ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
if (ret)
return ret;
@@ -2929,8 +2919,8 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
- struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
- struct pci_dev *pdev = ggtt->base.dev->pdev;
+ struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
phys_addr_t phys_addr;
int ret;
@@ -2944,7 +2934,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -3042,12 +3032,12 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
iounmap(ggtt->gsm);
- cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page);
+ cleanup_scratch_page(vm->i915, &vm->scratch_page);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+ struct drm_i915_private *dev_priv = ggtt->base.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3074,7 +3064,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
+ if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
@@ -3091,12 +3081,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
if (IS_CHERRYVIEW(dev_priv))
ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
return ggtt_probe_common(ggtt, size);
}
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+ struct drm_i915_private *dev_priv = ggtt->base.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3128,6 +3120,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.cleanup = gen6_gmch_remove;
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
if (HAS_EDRAM(dev_priv))
ggtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev_priv))
@@ -3149,7 +3143,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+ struct drm_i915_private *dev_priv = ggtt->base.i915;
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3158,8 +3152,10 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
return -EIO;
}
- intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
- &ggtt->mappable_base, &ggtt->mappable_end);
+ intel_gtt_get(&ggtt->base.total,
+ &ggtt->stolen_size,
+ &ggtt->mappable_base,
+ &ggtt->mappable_end);
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
ggtt->base.insert_page = i915_ggtt_insert_page;
@@ -3169,6 +3165,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.cleanup = i915_gmch_remove;
+ ggtt->invalidate = gmch_ggtt_invalidate;
+
if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3184,7 +3182,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- ggtt->base.dev = &dev_priv->drm;
+ ggtt->base.i915 = dev_priv;
if (INTEL_GEN(dev_priv) <= 5)
ret = i915_gmch_probe(ggtt);
@@ -3195,6 +3193,16 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
+ * This is easier than doing range restriction on the fly, as we
+ * currently don't have any bits spare to pass in this upper
+ * restriction!
+ */
+ if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
+ ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
+ ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ }
+
if ((ggtt->base.total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
" of address space! Found %lldM!\n",
@@ -3214,7 +3222,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
DRM_INFO("Memory usable by graphics device = %lluM\n",
ggtt->base.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n");
@@ -3277,6 +3285,16 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
return 0;
}
+void i915_ggtt_enable_guc(struct drm_i915_private *i915)
+{
+ i915->ggtt.invalidate = guc_ggtt_invalidate;
+}
+
+void i915_ggtt_disable_guc(struct drm_i915_private *i915)
+{
+ i915->ggtt.invalidate = gen6_ggtt_invalidate;
+}
+
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
@@ -3314,7 +3332,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = false;
if (INTEL_GEN(dev_priv) >= 8) {
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
+ if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
@@ -3340,52 +3358,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
}
}
- i915_ggtt_flush(dev_priv);
-}
-
-struct i915_vma *
-i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- struct rb_node *rb;
-
- rb = obj->vma_tree.rb_node;
- while (rb) {
- struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
- long cmp;
-
- cmp = i915_vma_compare(vma, vm, view);
- if (cmp == 0)
- return vma;
-
- if (cmp < 0)
- rb = rb->rb_right;
- else
- rb = rb->rb_left;
- }
-
- return NULL;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
-
- vma = i915_gem_obj_to_vma(obj, vm, view);
- if (!vma) {
- vma = i915_vma_create(obj, vm, view);
- GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
- }
-
- GEM_BUG_ON(i915_vma_is_closed(vma));
- return vma;
+ i915_ggtt_invalidate(dev_priv);
}
static struct scatterlist *
@@ -3485,7 +3458,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
{
struct sg_table *st;
struct scatterlist *sg, *iter;
- unsigned int count = view->params.partial.size;
+ unsigned int count = view->partial.size;
unsigned int offset;
int ret = -ENOMEM;
@@ -3497,9 +3470,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
if (ret)
goto err_sg_alloc;
- iter = i915_gem_object_get_sg(obj,
- view->params.partial.offset,
- &offset);
+ iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
GEM_BUG_ON(!iter);
sg = st->sgl;
@@ -3551,7 +3522,8 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
vma->pages = vma->obj->mm.pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->pages =
- intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
+ intel_rotate_fb_obj_pages(&vma->ggtt_view.rotated,
+ vma->obj);
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
else
@@ -3572,3 +3544,202 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return ret;
}
+/**
+ * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
+ * @vm: the &struct i915_address_space
+ * @node: the &struct drm_mm_node (typically i915_vma.mode)
+ * @size: how much space to allocate inside the GTT,
+ * must be #I915_GTT_PAGE_SIZE aligned
+ * @offset: where to insert inside the GTT,
+ * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
+ * (@offset + @size) must fit within the address space
+ * @color: color to apply to node, if this node is not from a VMA,
+ * color must be #I915_COLOR_UNEVICTABLE
+ * @flags: control search and eviction behaviour
+ *
+ * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
+ * the address space (using @size and @color). If the @node does not fit, it
+ * tries to evict any overlapping nodes from the GTT, including any
+ * neighbouring nodes if the colors do not match (to ensure guard pages between
+ * differing domains). See i915_gem_evict_for_node() for the gory details
+ * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
+ * evicting active overlapping objects, and any overlapping node that is pinned
+ * or marked as unevictable will also result in failure.
+ *
+ * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
+ * asked to wait for eviction and interrupted.
+ */
+int i915_gem_gtt_reserve(struct i915_address_space *vm,
+ struct drm_mm_node *node,
+ u64 size, u64 offset, unsigned long color,
+ unsigned int flags)
+{
+ int err;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
+ GEM_BUG_ON(range_overflows(offset, size, vm->total));
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(drm_mm_node_allocated(node));
+
+ node->size = size;
+ node->start = offset;
+ node->color = color;
+
+ err = drm_mm_reserve_node(&vm->mm, node);
+ if (err != -ENOSPC)
+ return err;
+
+ err = i915_gem_evict_for_node(vm, node, flags);
+ if (err == 0)
+ err = drm_mm_reserve_node(&vm->mm, node);
+
+ return err;
+}
+
+static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
+{
+ u64 range, addr;
+
+ GEM_BUG_ON(range_overflows(start, len, end));
+ GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
+
+ range = round_down(end - len, align) - round_up(start, align);
+ if (range) {
+ if (sizeof(unsigned long) == sizeof(u64)) {
+ addr = get_random_long();
+ } else {
+ addr = get_random_int();
+ if (range > U32_MAX) {
+ addr <<= 32;
+ addr |= get_random_int();
+ }
+ }
+ div64_u64_rem(addr, range, &addr);
+ start += addr;
+ }
+
+ return round_up(start, align);
+}
+
+/**
+ * i915_gem_gtt_insert - insert a node into an address_space (GTT)
+ * @vm: the &struct i915_address_space
+ * @node: the &struct drm_mm_node (typically i915_vma.node)
+ * @size: how much space to allocate inside the GTT,
+ * must be #I915_GTT_PAGE_SIZE aligned
+ * @alignment: required alignment of starting offset, may be 0 but
+ * if specified, this must be a power-of-two and at least
+ * #I915_GTT_MIN_ALIGNMENT
+ * @color: color to apply to node
+ * @start: start of any range restriction inside GTT (0 for all),
+ * must be #I915_GTT_PAGE_SIZE aligned
+ * @end: end of any range restriction inside GTT (U64_MAX for all),
+ * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
+ * @flags: control search and eviction behaviour
+ *
+ * i915_gem_gtt_insert() first searches for an available hole into which
+ * is can insert the node. The hole address is aligned to @alignment and
+ * its @size must then fit entirely within the [@start, @end] bounds. The
+ * nodes on either side of the hole must match @color, or else a guard page
+ * will be inserted between the two nodes (or the node evicted). If no
+ * suitable hole is found, first a victim is randomly selected and tested
+ * for eviction, otherwise then the LRU list of objects within the GTT
+ * is scanned to find the first set of replacement nodes to create the hole.
+ * Those old overlapping nodes are evicted from the GTT (and so must be
+ * rebound before any future use). Any node that is currently pinned cannot
+ * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
+ * active and #PIN_NONBLOCK is specified, that node is also skipped when
+ * searching for an eviction candidate. See i915_gem_evict_something() for
+ * the gory details on the eviction algorithm.
+ *
+ * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
+ * asked to wait for eviction and interrupted.
+ */
+int i915_gem_gtt_insert(struct i915_address_space *vm,
+ struct drm_mm_node *node,
+ u64 size, u64 alignment, unsigned long color,
+ u64 start, u64 end, unsigned int flags)
+{
+ enum drm_mm_insert_mode mode;
+ u64 offset;
+ int err;
+
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(alignment && !is_power_of_2(alignment));
+ GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
+ GEM_BUG_ON(start >= end);
+ GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(drm_mm_node_allocated(node));
+
+ if (unlikely(range_overflows(start, size, end)))
+ return -ENOSPC;
+
+ if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
+ return -ENOSPC;
+
+ mode = DRM_MM_INSERT_BEST;
+ if (flags & PIN_HIGH)
+ mode = DRM_MM_INSERT_HIGH;
+ if (flags & PIN_MAPPABLE)
+ mode = DRM_MM_INSERT_LOW;
+
+ /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+ * so we know that we always have a minimum alignment of 4096.
+ * The drm_mm range manager is optimised to return results
+ * with zero alignment, so where possible use the optimal
+ * path.
+ */
+ BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
+ if (alignment <= I915_GTT_MIN_ALIGNMENT)
+ alignment = 0;
+
+ err = drm_mm_insert_node_in_range(&vm->mm, node,
+ size, alignment, color,
+ start, end, mode);
+ if (err != -ENOSPC)
+ return err;
+
+ /* No free space, pick a slot at random.
+ *
+ * There is a pathological case here using a GTT shared between
+ * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
+ *
+ * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
+ * (64k objects) (448k objects)
+ *
+ * Now imagine that the eviction LRU is ordered top-down (just because
+ * pathology meets real life), and that we need to evict an object to
+ * make room inside the aperture. The eviction scan then has to walk
+ * the 448k list before it finds one within range. And now imagine that
+ * it has to search for a new hole between every byte inside the memcpy,
+ * for several simultaneous clients.
+ *
+ * On a full-ppgtt system, if we have run out of available space, there
+ * will be lots and lots of objects in the eviction list! Again,
+ * searching that LRU list may be slow if we are also applying any
+ * range restrictions (e.g. restriction to low 4GiB) and so, for
+ * simplicity and similarilty between different GTT, try the single
+ * random replacement first.
+ */
+ offset = random_offset(start, end,
+ size, alignment ?: I915_GTT_MIN_ALIGNMENT);
+ err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
+ if (err != -ENOSPC)
+ return err;
+
+ /* Randomly selected placement is pinned, do a search */
+ err = i915_gem_evict_something(vm, size, alignment, color,
+ start, end, flags);
+ if (err)
+ return err;
+
+ return drm_mm_insert_node_in_range(&vm->mm, node,
+ size, alignment, color,
+ start, end, DRM_MM_INSERT_EVICT);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 4f35be4c26c7..3c5ef5358cef 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -40,6 +40,9 @@
#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
+#define I915_GTT_PAGE_SIZE 4096UL
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
@@ -142,34 +145,57 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
struct sg_table;
-enum i915_ggtt_view_type {
- I915_GGTT_VIEW_NORMAL = 0,
- I915_GGTT_VIEW_ROTATED,
- I915_GGTT_VIEW_PARTIAL,
-};
-
struct intel_rotation_info {
- struct {
+ struct intel_rotation_plane_info {
/* tiles */
unsigned int width, height, stride, offset;
} plane[2];
+} __packed;
+
+static inline void assert_intel_rotation_info_is_packed(void)
+{
+ BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
+}
+
+struct intel_partial_info {
+ u64 offset;
+ unsigned int size;
+} __packed;
+
+static inline void assert_intel_partial_info_is_packed(void)
+{
+ BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+}
+
+enum i915_ggtt_view_type {
+ I915_GGTT_VIEW_NORMAL = 0,
+ I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
};
+static inline void assert_i915_ggtt_view_type_is_unique(void)
+{
+ /* As we encode the size of each branch inside the union into its type,
+ * we have to be careful that each branch has a unique size.
+ */
+ switch ((enum i915_ggtt_view_type)0) {
+ case I915_GGTT_VIEW_NORMAL:
+ case I915_GGTT_VIEW_PARTIAL:
+ case I915_GGTT_VIEW_ROTATED:
+ /* gcc complains if these are identical cases */
+ break;
+ }
+}
+
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
-
union {
- struct {
- u64 offset;
- unsigned int size;
- } partial;
+ /* Members need to contain no holes/padding */
+ struct intel_partial_info partial;
struct intel_rotation_info rotated;
- } params;
+ };
};
-extern const struct i915_ggtt_view i915_ggtt_view_normal;
-extern const struct i915_ggtt_view i915_ggtt_view_rotated;
-
enum i915_cache_level;
struct i915_vma;
@@ -220,7 +246,7 @@ struct i915_pml4 {
struct i915_address_space {
struct drm_mm mm;
struct i915_gem_timeline timeline;
- struct drm_device *dev;
+ struct drm_i915_private *i915;
/* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In
* principle, no information should leak from one context to another
@@ -315,15 +341,25 @@ struct i915_ggtt {
struct i915_address_space base;
struct io_mapping mappable; /* Mapping to our CPU mappable region */
- size_t stolen_size; /* Total size of stolen memory */
- size_t stolen_usable_size; /* Total size minus BIOS reserved */
- size_t stolen_reserved_base;
- size_t stolen_reserved_size;
- u64 mappable_end; /* End offset that we can CPU map */
phys_addr_t mappable_base; /* PA of our GMADR */
+ u64 mappable_end; /* End offset that we can CPU map */
+
+ /* Stolen memory is segmented in hardware with different portions
+ * offlimits to certain functions.
+ *
+ * The drm_mm is initialised to the total accessible range, as found
+ * from the PCI config. On Broadwell+, this is further restricted to
+ * avoid the first page! The upper end of stolen memory is reserved for
+ * hardware functions and similarly removed from the accessible range.
+ */
+ u32 stolen_size; /* Total size of stolen memory */
+ u32 stolen_usable_size; /* Total size minus reserved ranges */
+ u32 stolen_reserved_base;
+ u32 stolen_reserved_size;
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
+ void (*invalidate)(struct drm_i915_private *dev_priv);
bool do_idle_maps;
@@ -492,6 +528,8 @@ i915_vm_to_ggtt(struct i915_address_space *vm)
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
+void i915_ggtt_enable_guc(struct drm_i915_private *i915);
+void i915_ggtt_disable_guc(struct drm_i915_private *i915);
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
@@ -500,6 +538,7 @@ void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *fpriv,
const char *name);
+void i915_ppgtt_close(struct i915_address_space *vm);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@@ -520,6 +559,16 @@ int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
+int i915_gem_gtt_reserve(struct i915_address_space *vm,
+ struct drm_mm_node *node,
+ u64 size, u64 offset, unsigned long color,
+ unsigned int flags);
+
+int i915_gem_gtt_insert(struct i915_address_space *vm,
+ struct drm_mm_node *node,
+ u64 size, u64 alignment, unsigned long color,
+ u64 start, u64 end, unsigned int flags);
+
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0)
#define PIN_MAPPABLE BIT(1)
@@ -534,6 +583,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
#define PIN_HIGH BIT(9)
#define PIN_OFFSET_BIAS BIT(10)
#define PIN_OFFSET_FIXED BIT(11)
-#define PIN_OFFSET_MASK (~4095)
+#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index d09c74973cb3..933019e1b206 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -46,24 +46,12 @@ static struct sg_table *
i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned int npages = obj->base.size / PAGE_SIZE;
struct sg_table *st;
struct scatterlist *sg;
+ unsigned int npages;
int max_order;
gfp_t gfp;
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- return ERR_PTR(-ENOMEM);
-
- if (sg_alloc_table(st, npages, GFP_KERNEL)) {
- kfree(st);
- return ERR_PTR(-ENOMEM);
- }
-
- sg = st->sgl;
- st->nents = 0;
-
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -79,12 +67,26 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
#endif
gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
- if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
+ if (IS_I965GM(i915) || IS_I965G(i915)) {
/* 965gm cannot relocate objects above 4GiB. */
gfp &= ~__GFP_HIGHMEM;
gfp |= __GFP_DMA32;
}
+create_st:
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ npages = obj->base.size / PAGE_SIZE;
+ if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+ kfree(st);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = st->sgl;
+ st->nents = 0;
+
do {
int order = min(fls(npages) - 1, max_order);
struct page *page;
@@ -112,8 +114,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
sg = __sg_next(sg);
} while (1);
- if (i915_gem_gtt_prepare_pages(obj, st))
+ if (i915_gem_gtt_prepare_pages(obj, st)) {
+ /* Failed to dma-map try again with single page sg segments */
+ if (get_order(st->sgl->length)) {
+ internal_free_pages(st);
+ max_order = 0;
+ goto create_st;
+ }
goto err;
+ }
/* Mark the pages as dontneed whilst they are still pinned. As soon
* as they are unpinned they are allowed to be reaped by the shrinker,
@@ -159,11 +168,17 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
*/
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
- unsigned int size)
+ phys_addr_t size)
{
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc(&i915->drm);
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index ecfefb9d42e4..bf90b07163d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -317,6 +317,29 @@ i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
return obj->tiling_and_stride & STRIDE_MASK;
}
+static inline unsigned int
+i915_gem_tile_height(unsigned int tiling)
+{
+ GEM_BUG_ON(!tiling);
+ return tiling == I915_TILING_Y ? 32 : 8;
+}
+
+static inline unsigned int
+i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
+}
+
+static inline unsigned int
+i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
+{
+ return (i915_gem_object_get_stride(obj) *
+ i915_gem_object_get_tile_height(obj));
+}
+
+int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+ unsigned int tiling, unsigned int stride);
+
static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 5af19b0bf713..b42c81b42487 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -187,20 +187,20 @@ int i915_gem_render_state_init(struct intel_engine_cs *engine)
if (!rodata)
return 0;
- if (rodata->batch_items * 4 > 4096)
+ if (rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL;
so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so)
return -ENOMEM;
- obj = i915_gem_object_create_internal(engine->i915, 4096);
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_free;
}
- so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ so->vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(so->vma)) {
ret = PTR_ERR(so->vma);
goto err_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index b8f403faadbb..f31deeb72703 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -62,6 +62,15 @@ static void i915_fence_release(struct dma_fence *fence)
{
struct drm_i915_gem_request *req = to_request(fence);
+ /* The request is put onto a RCU freelist (i.e. the address
+ * is immediately reused), mark the fences as being freed now.
+ * Otherwise the debugobjects for the fences are only marked as
+ * freed when the slab cache itself is freed, and so we would get
+ * caught trying to reuse dead objects.
+ */
+ i915_sw_fence_fini(&req->submit);
+ i915_sw_fence_fini(&req->execute);
+
kmem_cache_free(req->i915->requests, req);
}
@@ -197,6 +206,7 @@ void i915_gem_retire_noop(struct i915_gem_active *active,
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
+ struct intel_engine_cs *engine = request->engine;
struct i915_gem_active *active, *next;
lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -207,9 +217,9 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
trace_i915_gem_request_retire(request);
- spin_lock_irq(&request->engine->timeline->lock);
+ spin_lock_irq(&engine->timeline->lock);
list_del_init(&request->link);
- spin_unlock_irq(&request->engine->timeline->lock);
+ spin_unlock_irq(&engine->timeline->lock);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
@@ -257,13 +267,20 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
i915_gem_request_remove_from_client(request);
- if (request->previous_context) {
- if (i915.enable_execlists)
- intel_lr_context_unpin(request->previous_context,
- request->engine);
- }
+ /* Retirement decays the ban score as it is a sign of ctx progress */
+ if (request->ctx->ban_score > 0)
+ request->ctx->ban_score--;
- i915_gem_context_put(request->ctx);
+ /* The backing object for the context is done after switching to the
+ * *next* context. Therefore we cannot retire the previous context until
+ * the next context has already started running. However, since we
+ * cannot take the required locks at i915_gem_request_submit() we
+ * defer the unpinning of the active context to now, retirement of
+ * the subsequent request.
+ */
+ if (engine->last_retired_context)
+ engine->context_unpin(engine, engine->last_retired_context);
+ engine->last_retired_context = request->ctx;
dma_fence_signal(&request->fence);
@@ -277,6 +294,8 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
struct drm_i915_gem_request *tmp;
lockdep_assert_held(&req->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_gem_request_completed(req));
+
if (list_empty(&req->link))
return;
@@ -288,26 +307,6 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
} while (tmp != req);
}
-static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
-{
- struct i915_gpu_error *error = &dev_priv->gpu_error;
-
- if (i915_terminally_wedged(error))
- return -EIO;
-
- if (i915_reset_in_progress(error)) {
- /* Non-interruptible callers can't handle -EAGAIN, hence return
- * -EIO unconditionally for these.
- */
- if (!dev_priv->mm.interruptible)
- return -EIO;
-
- return -EAGAIN;
- }
-
- return 0;
-}
-
static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
@@ -326,11 +325,11 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
+ if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) {
while (intel_breadcrumbs_busy(i915))
cond_resched(); /* spin until threads are complete */
}
- atomic_set(&timeline->next_seqno, seqno);
+ atomic_set(&timeline->seqno, seqno);
/* Finally reset hw state */
for_each_engine(engine, i915, id)
@@ -365,11 +364,11 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
static int reserve_global_seqno(struct drm_i915_private *i915)
{
u32 active_requests = ++i915->gt.active_requests;
- u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
+ u32 seqno = atomic_read(&i915->gt.global_timeline.seqno);
int ret;
/* Reservation is fine until we need to wrap around */
- if (likely(next_seqno + active_requests > next_seqno))
+ if (likely(seqno + active_requests > seqno))
return 0;
ret = i915_gem_init_global_seqno(i915, 0);
@@ -383,13 +382,13 @@ static int reserve_global_seqno(struct drm_i915_private *i915)
static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
{
- /* next_seqno only incremented under a mutex */
- return ++tl->next_seqno.counter;
+ /* seqno only incremented under a mutex */
+ return ++tl->seqno.counter;
}
static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
{
- return atomic_inc_return(&tl->next_seqno);
+ return atomic_inc_return(&tl->seqno);
}
void __i915_gem_request_submit(struct drm_i915_gem_request *request)
@@ -502,16 +501,22 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
- * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
- * and restart.
+ * EIO if the GPU is already wedged.
+ */
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return ERR_PTR(-EIO);
+
+ /* Pinning the contexts may generate requests in order to acquire
+ * GGTT space, so do this first before we reserve a seqno for
+ * ourselves.
*/
- ret = i915_gem_check_wedge(dev_priv);
+ ret = engine->context_pin(engine, ctx);
if (ret)
return ERR_PTR(ret);
ret = reserve_global_seqno(dev_priv);
if (ret)
- return ERR_PTR(ret);
+ goto err_unpin;
/* Move the oldest request to the slab-cache (if not in use!) */
req = list_first_entry_or_null(&engine->timeline->requests,
@@ -578,11 +583,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv;
req->engine = engine;
- req->ctx = i915_gem_context_get(ctx);
+ req->ctx = ctx;
/* No zalloc, must clear what we need by hand */
req->global_seqno = 0;
- req->previous_context = NULL;
req->file_priv = NULL;
req->batch = NULL;
@@ -596,10 +600,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
- if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(req);
- else
- ret = intel_ring_alloc_request_extras(req);
+ ret = engine->request_alloc(req);
if (ret)
goto err_ctx;
@@ -613,10 +614,16 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
return req;
err_ctx:
- i915_gem_context_put(ctx);
+ /* Make sure we didn't add ourselves to external state before freeing */
+ GEM_BUG_ON(!list_empty(&req->active_list));
+ GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
+ GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
+
kmem_cache_free(dev_priv->requests, req);
err_unreserve:
dev_priv->gt.active_requests--;
+err_unpin:
+ engine->context_unpin(engine, ctx);
return ERR_PTR(ret);
}
@@ -822,6 +829,13 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
lockdep_assert_held(&request->i915->drm.struct_mutex);
trace_i915_gem_request_add(request);
+ /* Make sure that no request gazumped us - if it was allocated after
+ * our i915_gem_request_alloc() and called __i915_add_request() before
+ * us, the timeline will hold its seqno which is later than ours.
+ */
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
+ request->fence.seqno));
+
/*
* To ensure that this call will not fail, space for its emissions
* should already have been reserved in the ring buffer. Let the ring
@@ -1011,8 +1025,13 @@ __i915_request_wait_for_execute(struct drm_i915_gem_request *request,
break;
}
+ if (!timeout) {
+ timeout = -ETIME;
+ break;
+ }
+
timeout = io_schedule_timeout(timeout);
- } while (timeout);
+ } while (1);
finish_wait(&request->execute.wait, &wait);
if (flags & I915_WAIT_LOCKED)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index d229f47d1028..ea511f06efaf 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -170,17 +170,6 @@ struct drm_i915_gem_request {
/** Preallocate space in the ring for the emitting the request */
u32 reserved_space;
- /**
- * Context related to the previous request.
- * As the contexts are accessed by the hardware until the switch is
- * completed to a new context, the hardware may still be writing
- * to the context object after the breadcrumb is visible. We must
- * not unpin/unbind/prune that object whilst still active and so
- * we keep the previous context pinned until the following (this)
- * request is retired.
- */
- struct i915_gem_context *previous_context;
-
/** Batch buffer related to this request if any (used for
* error state dump only).
*/
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index abc78bbfc1dc..9673bcc3b6ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -54,16 +54,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- /* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
- */
- if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
- start = 4096;
-
mutex_lock(&dev_priv->mm.stolen_lock);
- ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
- alignment, start, end,
- DRM_MM_SEARCH_DEFAULT);
+ ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
+ size, alignment, 0,
+ start, end, DRM_MM_INSERT_BEST);
mutex_unlock(&dev_priv->mm.stolen_lock);
return ret;
@@ -73,11 +67,8 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
- alignment, 0,
- ggtt->stolen_usable_size);
+ alignment, 0, U64_MAX);
}
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
@@ -152,7 +143,7 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_845G(dev_priv)) {
+ } else if (IS_I845G(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -202,8 +193,8 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
return 0;
/* make sure we don't clobber the GTT if it's within stolen memory */
- if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
- !IS_G4X(dev_priv)) {
+ if (INTEL_GEN(dev_priv) <= 4 &&
+ !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
struct {
u32 start, end;
} stolen[2] = {
@@ -290,14 +281,13 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
}
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
- unsigned long *base, unsigned long *size)
+ phys_addr_t *base, u32 *size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
- unsigned long stolen_top = dev_priv->mm.stolen_base +
- ggtt->stolen_size;
+ phys_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
@@ -314,7 +304,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
- unsigned long *base, unsigned long *size)
+ phys_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -340,7 +330,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
- unsigned long *base, unsigned long *size)
+ phys_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -359,8 +349,8 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
- unsigned long *base, unsigned long *size)
+static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ phys_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -386,11 +376,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
- unsigned long *base, unsigned long *size)
+ phys_addr_t *base, u32 *size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- unsigned long stolen_top;
+ phys_addr_t stolen_top;
stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
@@ -409,11 +399,17 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long reserved_total, reserved_base = 0, reserved_size;
- unsigned long stolen_top;
+ phys_addr_t reserved_base, stolen_top;
+ u32 reserved_total, reserved_size;
+ u32 stolen_usable_start;
mutex_init(&dev_priv->mm.stolen_lock);
+ if (intel_vgpu_active(dev_priv)) {
+ DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
+ return 0;
+ }
+
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
@@ -429,6 +425,8 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
return 0;
stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ reserved_base = 0;
+ reserved_size = 0;
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
@@ -436,8 +434,8 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
break;
case 4:
if (IS_G4X(dev_priv))
- g4x_get_stolen_reserved(dev_priv, &reserved_base,
- &reserved_size);
+ g4x_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
case 5:
/* Assume the gen6 maximum for the older platforms. */
@@ -445,21 +443,20 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_base = stolen_top - reserved_size;
break;
case 6:
- gen6_get_stolen_reserved(dev_priv, &reserved_base,
- &reserved_size);
+ gen6_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
case 7:
- gen7_get_stolen_reserved(dev_priv, &reserved_base,
- &reserved_size);
+ gen7_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
default:
- if (IS_BROADWELL(dev_priv) ||
- IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- bdw_get_stolen_reserved(dev_priv, &reserved_base,
- &reserved_size);
+ if (IS_LP(dev_priv))
+ chv_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
else
- gen8_get_stolen_reserved(dev_priv, &reserved_base,
- &reserved_size);
+ bdw_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
}
@@ -472,9 +469,10 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
if (reserved_base < dev_priv->mm.stolen_base ||
reserved_base + reserved_size > stolen_top) {
- DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
- reserved_base, reserved_base + reserved_size,
- dev_priv->mm.stolen_base, stolen_top);
+ phys_addr_t reserved_top = reserved_base + reserved_size;
+ DRM_DEBUG_KMS("Stolen reserved area [%pa - %pa] outside stolen memory [%pa - %pa]\n",
+ &reserved_base, &reserved_top,
+ &dev_priv->mm.stolen_base, &stolen_top);
return 0;
}
@@ -485,24 +483,21 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
- DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
+ DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
ggtt->stolen_size >> 10,
(ggtt->stolen_size - reserved_total) >> 10);
- ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
+ stolen_usable_start = 0;
+ /* WaSkipStolenMemoryFirstPage:bdw+ */
+ if (INTEL_GEN(dev_priv) >= 8)
+ stolen_usable_start = 4096;
- /*
- * Basic memrange allocator for stolen space.
- *
- * TODO: Notice that some platforms require us to not use the first page
- * of the stolen memory but their BIOSes may still put the framebuffer
- * on the first page. So we don't reserve this page for now because of
- * that. Our current solution is to just prevent new nodes from being
- * inserted on the first page - see the check we have at
- * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
- * problem later.
- */
- drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
+ ggtt->stolen_usable_size =
+ ggtt->stolen_size - reserved_total - stolen_usable_start;
+
+ /* Basic memrange allocator for stolen space. */
+ drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
+ ggtt->stolen_usable_size);
return 0;
}
@@ -515,7 +510,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
+ GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -578,22 +573,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
};
static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_device *dev,
+_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_mm_node *stolen)
{
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc(dev);
+ obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return NULL;
- drm_gem_private_object_init(dev, &obj->base, stolen->size);
+ drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- obj->cache_level = HAS_LLC(to_i915(dev)) ?
- I915_CACHE_LLC : I915_CACHE_NONE;
+ obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
if (i915_gem_object_pin_pages(obj))
goto cleanup;
@@ -606,9 +600,8 @@ cleanup:
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
@@ -629,7 +622,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
return NULL;
}
- obj = _i915_gem_object_create_stolen(dev, stolen);
+ obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj)
return obj;
@@ -639,12 +632,11 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
u32 stolen_offset,
u32 gtt_offset,
u32 size)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
@@ -654,14 +646,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
stolen_offset, gtt_offset, size);
/* KISS and expect everything to be page-aligned */
- if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
- WARN_ON(stolen_offset & 4095))
+ if (WARN_ON(size == 0) ||
+ WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
+ WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
return NULL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
@@ -679,7 +672,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
return NULL;
}
- obj = _i915_gem_object_create_stolen(dev, stolen);
+ obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
i915_gem_stolen_remove_node(dev_priv, stolen);
@@ -695,7 +688,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (ret)
goto err;
- vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
+ vma = i915_vma_instance(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_pages;
@@ -706,15 +699,16 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* setting up the GTT space. The actual reservation will occur
* later.
*/
- vma->node.start = gtt_offset;
- vma->node.size = size;
-
- ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
+ ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+ size, gtt_offset, obj->cache_level,
+ 0);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err_pages;
}
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c85e7b06bdba..974ac08df473 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -34,8 +34,8 @@
/**
* DOC: buffer object tiling
*
- * i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
- * declare fence register requirements.
+ * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace
+ * interface to declare fence register requirements.
*
* In principle GEM doesn't care at all about the internal data layout of an
* object, and hence it also doesn't care about tiling or swizzling. There's two
@@ -58,86 +58,147 @@
* invovlement.
*/
+/**
+ * i915_gem_fence_size - required global GTT size for a fence
+ * @i915: i915 device
+ * @size: object size
+ * @tiling: tiling mode
+ * @stride: tiling stride
+ *
+ * Return the required global GTT size for a fence (view of a tiled object),
+ * taking into account potential fence register mapping.
+ */
+u32 i915_gem_fence_size(struct drm_i915_private *i915,
+ u32 size, unsigned int tiling, unsigned int stride)
+{
+ u32 ggtt_size;
+
+ GEM_BUG_ON(!size);
+
+ if (tiling == I915_TILING_NONE)
+ return size;
+
+ GEM_BUG_ON(!stride);
+
+ if (INTEL_GEN(i915) >= 4) {
+ stride *= i915_gem_tile_height(tiling);
+ GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
+ return roundup(size, stride);
+ }
+
+ /* Previous chips need a power-of-two fence region when tiling */
+ if (IS_GEN3(i915))
+ ggtt_size = 1024*1024;
+ else
+ ggtt_size = 512*1024;
+
+ while (ggtt_size < size)
+ ggtt_size <<= 1;
+
+ return ggtt_size;
+}
+
+/**
+ * i915_gem_fence_alignment - required global GTT alignment for a fence
+ * @i915: i915 device
+ * @size: object size
+ * @tiling: tiling mode
+ * @stride: tiling stride
+ *
+ * Return the required global GTT alignment for a fence (a view of a tiled
+ * object), taking into account potential fence register mapping.
+ */
+u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
+ unsigned int tiling, unsigned int stride)
+{
+ GEM_BUG_ON(!size);
+
+ /*
+ * Minimum alignment is 4k (GTT page size), but might be greater
+ * if a fence register is needed for the object.
+ */
+ if (tiling == I915_TILING_NONE)
+ return I915_GTT_MIN_ALIGNMENT;
+
+ if (INTEL_GEN(i915) >= 4)
+ return I965_FENCE_PAGE;
+
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ return i915_gem_fence_size(i915, size, tiling, stride);
+}
+
/* Check pitch constriants for all chips & tiling formats */
static bool
-i915_tiling_ok(struct drm_i915_private *dev_priv,
- int stride, int size, int tiling_mode)
+i915_tiling_ok(struct drm_i915_gem_object *obj,
+ unsigned int tiling, unsigned int stride)
{
- int tile_width;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int tile_width;
/* Linear is always fine */
- if (tiling_mode == I915_TILING_NONE)
+ if (tiling == I915_TILING_NONE)
return true;
- if (tiling_mode > I915_TILING_LAST)
+ if (tiling > I915_TILING_LAST)
return false;
- if (IS_GEN2(dev_priv) ||
- (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev_priv)))
- tile_width = 128;
- else
- tile_width = 512;
-
/* check maximum stride & object size */
/* i965+ stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
- if (INTEL_GEN(dev_priv) >= 7) {
+ if (INTEL_GEN(i915) >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
- } else if (INTEL_GEN(dev_priv) >= 4) {
+ } else if (INTEL_GEN(i915) >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
if (stride > 8192)
return false;
- if (IS_GEN3(dev_priv)) {
- if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ if (IS_GEN3(i915)) {
+ if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
- if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 19)
return false;
}
}
- if (stride < tile_width)
+ if (IS_GEN2(i915) ||
+ (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ if (!stride || !IS_ALIGNED(stride, tile_width))
return false;
/* 965+ just needs multiples of tile width */
- if (INTEL_GEN(dev_priv) >= 4) {
- if (stride & (tile_width - 1))
- return false;
+ if (INTEL_GEN(i915) >= 4)
return true;
- }
/* Pre-965 needs power of two tile widths */
- if (stride & (stride - 1))
- return false;
-
- return true;
+ return is_power_of_2(stride);
}
-static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
+static bool i915_vma_fence_prepare(struct i915_vma *vma,
+ int tiling_mode, unsigned int stride)
{
- struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
- u32 size;
+ struct drm_i915_private *i915 = vma->vm->i915;
+ u32 size, alignment;
if (!i915_vma_is_map_and_fenceable(vma))
return true;
- if (INTEL_GEN(dev_priv) == 3) {
- if (vma->node.start & ~I915_FENCE_START_MASK)
- return false;
- } else {
- if (vma->node.start & ~I830_FENCE_START_MASK)
- return false;
- }
-
- size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode);
+ size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
if (vma->node.size < size)
return false;
- if (vma->node.start & (size - 1))
+ alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
+ if (!IS_ALIGNED(vma->node.start, alignment))
return false;
return true;
@@ -145,20 +206,20 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
/* Make the current GTT allocation valid for the change in tiling. */
static int
-i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
+i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
+ int tiling_mode, unsigned int stride)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
int ret;
if (tiling_mode == I915_TILING_NONE)
return 0;
- if (INTEL_GEN(dev_priv) >= 4)
- return 0;
-
list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (i915_vma_fence_prepare(vma, tiling_mode))
+ if (!i915_vma_is_ggtt(vma))
+ break;
+
+ if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
ret = i915_vma_unbind(vma);
@@ -169,8 +230,100 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
return 0;
}
+int
+i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+ unsigned int tiling, unsigned int stride)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+ int err;
+
+ /* Make sure we don't cross-contaminate obj->tiling_and_stride */
+ BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
+
+ GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
+ GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ if ((tiling | stride) == obj->tiling_and_stride)
+ return 0;
+
+ if (obj->framebuffer_references)
+ return -EBUSY;
+
+ /* We need to rebind the object if its current allocation
+ * no longer meets the alignment restrictions for its new
+ * tiling mode. Otherwise we can just leave it alone, but
+ * need to ensure that any fence register is updated before
+ * the next fenced (either through the GTT or by the BLT unit
+ * on older GPUs) access.
+ *
+ * After updating the tiling parameters, we then flag whether
+ * we need to update an associated fence register. Note this
+ * has to also include the unfenced register the GPU uses
+ * whilst executing a fenced command for an untiled object.
+ */
+
+ err = i915_gem_object_fence_prepare(obj, tiling, stride);
+ if (err)
+ return err;
+
+ /* If the memory has unknown (i.e. varying) swizzling, we pin the
+ * pages to prevent them being swapped out and causing corruption
+ * due to the change in swizzling.
+ */
+ mutex_lock(&obj->mm.lock);
+ if (obj->mm.pages &&
+ obj->mm.madv == I915_MADV_WILLNEED &&
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ if (tiling == I915_TILING_NONE) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_unpin_pages(obj);
+ obj->mm.quirked = false;
+ }
+ if (!i915_gem_object_is_tiled(obj)) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
+ }
+ mutex_unlock(&obj->mm.lock);
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ break;
+
+ vma->fence_size =
+ i915_gem_fence_size(i915, vma->size, tiling, stride);
+ vma->fence_alignment =
+ i915_gem_fence_alignment(i915,
+ vma->size, tiling, stride);
+
+ if (vma->fence)
+ vma->fence->dirty = true;
+ }
+
+ obj->tiling_and_stride = tiling | stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj);
+
+ /* Try to preallocate memory required to save swizzling on put-pages */
+ if (i915_gem_object_needs_bit17_swizzle(obj)) {
+ if (!obj->bit_17) {
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
+ sizeof(long), GFP_KERNEL);
+ }
+ } else {
+ kfree(obj->bit_17);
+ obj->bit_17 = NULL;
+ }
+
+ return 0;
+}
+
/**
- * i915_gem_set_tiling - IOCTL handler to set tiling mode
+ * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode
* @dev: DRM device
* @data: data pointer for the ioctl
* @file: DRM file for the ioctl call
@@ -184,30 +337,19 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
* Zero on success, negative errno on failure.
*/
int
-i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- int err = 0;
-
- /* Make sure we don't cross-contaminate obj->tiling_and_stride */
- BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
+ int err;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
- if (!i915_tiling_ok(dev_priv,
- args->stride, obj->base.size, args->tiling_mode)) {
- i915_gem_object_put(obj);
- return -EINVAL;
- }
-
- mutex_lock(&dev->struct_mutex);
- if (obj->pin_display || obj->framebuffer_references) {
- err = -EBUSY;
+ if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) {
+ err = -EINVAL;
goto err;
}
@@ -216,9 +358,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x;
else
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -240,79 +382,24 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
}
- if (args->tiling_mode != i915_gem_object_get_tiling(obj) ||
- args->stride != i915_gem_object_get_stride(obj)) {
- /* We need to rebind the object if its current allocation
- * no longer meets the alignment restrictions for its new
- * tiling mode. Otherwise we can just leave it alone, but
- * need to ensure that any fence register is updated before
- * the next fenced (either through the GTT or by the BLT unit
- * on older GPUs) access.
- *
- * After updating the tiling parameters, we then flag whether
- * we need to update an associated fence register. Note this
- * has to also include the unfenced register the GPU uses
- * whilst executing a fenced command for an untiled object.
- */
+ err = mutex_lock_interruptible(&dev->struct_mutex);
+ if (err)
+ goto err;
- err = i915_gem_object_fence_prepare(obj, args->tiling_mode);
- if (!err) {
- struct i915_vma *vma;
-
- mutex_lock(&obj->mm.lock);
- if (obj->mm.pages &&
- obj->mm.madv == I915_MADV_WILLNEED &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (args->tiling_mode == I915_TILING_NONE) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
- }
- if (!i915_gem_object_is_tiled(obj)) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
- }
- }
- mutex_unlock(&obj->mm.lock);
-
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!vma->fence)
- continue;
-
- vma->fence->dirty = true;
- }
- obj->tiling_and_stride =
- args->stride | args->tiling_mode;
-
- /* Force the fence to be reacquired for GTT access */
- i915_gem_release_mmap(obj);
- }
- }
- /* we have to maintain this existing ABI... */
+ err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
+ mutex_unlock(&dev->struct_mutex);
+
+ /* We have to maintain this existing ABI... */
args->stride = i915_gem_object_get_stride(obj);
args->tiling_mode = i915_gem_object_get_tiling(obj);
- /* Try to preallocate memory required to save swizzling on put-pages */
- if (i915_gem_object_needs_bit17_swizzle(obj)) {
- if (obj->bit_17 == NULL) {
- obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
- sizeof(long), GFP_KERNEL);
- }
- } else {
- kfree(obj->bit_17);
- obj->bit_17 = NULL;
- }
-
err:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
-
return err;
}
/**
- * i915_gem_get_tiling - IOCTL handler to get tiling mode
+ * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode
* @dev: DRM device
* @data: data pointer for the ioctl
* @file: DRM file for the ioctl call
@@ -325,8 +412,8 @@ err:
* Zero on success, negative errno on failure.
*/
int
-i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
index bf8a471b61e6..b596ca7ee058 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -81,10 +81,18 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915)
&class, "&global_timeline->lock");
}
-void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
+void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
{
- lockdep_assert_held(&tl->i915->drm.struct_mutex);
+ int i;
- list_del(&tl->link);
- kfree(tl->name);
+ lockdep_assert_held(&timeline->i915->drm.struct_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+ struct intel_timeline *tl = &timeline->engine[i];
+
+ GEM_BUG_ON(!list_empty(&tl->requests));
+ }
+
+ list_del(&timeline->link);
+ kfree(timeline->name);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index 98d99a62b4ae..f2e51f42cc2f 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -56,7 +56,7 @@ struct intel_timeline {
struct i915_gem_timeline {
struct list_head link;
- atomic_t next_seqno;
+ atomic_t seqno;
struct drm_i915_private *i915;
const char *name;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d068af2ec3a3..6a8fa085b74e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -784,7 +784,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return -ENODEV;
}
- obj = i915_gem_object_alloc(dev);
+ obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index ae84aa4b1467..9cd22cda17af 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -121,6 +121,7 @@ static void __i915_error_advance(struct drm_i915_error_state_buf *e,
e->pos += len;
}
+__printf(2, 0)
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
const char *f, va_list args)
{
@@ -176,9 +177,14 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
-static bool compress_init(struct z_stream_s *zstream)
+struct compress {
+ struct z_stream_s zstream;
+ void *tmp;
+};
+
+static bool compress_init(struct compress *c)
{
- memset(zstream, 0, sizeof(*zstream));
+ struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
zstream->workspace =
kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
@@ -191,14 +197,22 @@ static bool compress_init(struct z_stream_s *zstream)
return false;
}
+ c->tmp = NULL;
+ if (i915_has_memcpy_from_wc())
+ c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+
return true;
}
-static int compress_page(struct z_stream_s *zstream,
+static int compress_page(struct compress *c,
void *src,
struct drm_i915_error_object *dst)
{
+ struct z_stream_s *zstream = &c->zstream;
+
zstream->next_in = src;
+ if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+ zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
do {
@@ -226,9 +240,11 @@ static int compress_page(struct z_stream_s *zstream,
return 0;
}
-static void compress_fini(struct z_stream_s *zstream,
+static void compress_fini(struct compress *c,
struct drm_i915_error_object *dst)
{
+ struct z_stream_s *zstream = &c->zstream;
+
if (dst) {
zlib_deflate(zstream, Z_FINISH);
dst->unused = zstream->avail_out;
@@ -236,6 +252,9 @@ static void compress_fini(struct z_stream_s *zstream,
zlib_deflateEnd(zstream);
kfree(zstream->workspace);
+
+ if (c->tmp)
+ free_page((unsigned long)c->tmp);
}
static void err_compression_marker(struct drm_i915_error_state_buf *m)
@@ -245,28 +264,34 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#else
-static bool compress_init(struct z_stream_s *zstream)
+struct compress {
+};
+
+static bool compress_init(struct compress *c)
{
return true;
}
-static int compress_page(struct z_stream_s *zstream,
+static int compress_page(struct compress *c,
void *src,
struct drm_i915_error_object *dst)
{
unsigned long page;
+ void *ptr;
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
return -ENOMEM;
- dst->pages[dst->page_count++] =
- memcpy((void *)page, src, PAGE_SIZE);
+ ptr = (void *)page;
+ if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+ memcpy(ptr, src, PAGE_SIZE);
+ dst->pages[dst->page_count++] = ptr;
return 0;
}
-static void compress_fini(struct z_stream_s *zstream,
+static void compress_fini(struct compress *c,
struct drm_i915_error_object *dst)
{
}
@@ -316,24 +341,6 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
}
}
-static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
-{
- switch (a) {
- case HANGCHECK_IDLE:
- return "idle";
- case HANGCHECK_WAIT:
- return "wait";
- case HANGCHECK_ACTIVE:
- return "active";
- case HANGCHECK_KICK:
- return "kick";
- case HANGCHECK_HUNG:
- return "hung";
- }
-
- return "unknown";
-}
-
static void error_print_instdone(struct drm_i915_error_state_buf *m,
struct drm_i915_error_engine *ee)
{
@@ -370,8 +377,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
- prefix, erq->pid,
+ err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+ prefix, erq->pid, erq->ban_score,
erq->context, erq->seqno,
jiffies_to_msecs(jiffies - erq->jiffies),
erq->head, erq->tail);
@@ -441,9 +448,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " waiting: %s\n", yesno(ee->waiting));
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
- err_printf(m, " hangcheck: %s [%d]\n",
- hangcheck_action_to_str(ee->hangcheck_action),
- ee->hangcheck_score);
+ err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
+ err_printf(m, " hangcheck action: %s\n",
+ hangcheck_action_to_str(ee->hangcheck_action));
+ err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
+ ee->hangcheck_timestamp,
+ jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
+
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
}
@@ -528,11 +539,10 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
- struct drm_i915_private *dev_priv = to_i915(error_priv->dev);
+ struct drm_i915_private *dev_priv = error_priv->i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
- int max_hangcheck_score;
int i, j;
if (!error) {
@@ -549,22 +559,20 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Uptime: %ld s %ld us\n",
error->uptime.tv_sec, error->uptime.tv_usec);
err_print_capabilities(m, &error->device_info);
- max_hangcheck_score = 0;
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (error->engine[i].hangcheck_score > max_hangcheck_score)
- max_hangcheck_score = error->engine[i].hangcheck_score;
- }
+
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (error->engine[i].hangcheck_score == max_hangcheck_score &&
+ if (error->engine[i].hangcheck_stalled &&
error->engine[i].pid != -1) {
- err_printf(m, "Active process (on ring %s): %s [%d]\n",
+ err_printf(m, "Active process (on ring %s): %s [%d], context bans %d\n",
engine_str(i),
error->engine[i].comm,
- error->engine[i].pid);
+ error->engine[i].pid,
+ error->engine[i].context_bans);
}
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
+ err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
err_printf(m, "PCI Subsystem: %04x:%04x\n",
@@ -651,9 +659,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (obj) {
err_puts(m, dev_priv->engine[i]->name);
if (ee->pid != -1)
- err_printf(m, " (submitted by %s [%d])",
+ err_printf(m, " (submitted by %s [%d], bans %d)",
ee->comm,
- ee->pid);
+ ee->pid,
+ ee->context_bans);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
@@ -801,7 +810,7 @@ i915_error_object_create(struct drm_i915_private *i915,
struct i915_ggtt *ggtt = &i915->ggtt;
const u64 slot = ggtt->error_capture.start;
struct drm_i915_error_object *dst;
- struct z_stream_s zstream;
+ struct compress compress;
unsigned long num_pages;
struct sgt_iter iter;
dma_addr_t dma;
@@ -821,7 +830,7 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->page_count = 0;
dst->unused = 0;
- if (!compress_init(&zstream)) {
+ if (!compress_init(&compress)) {
kfree(dst);
return NULL;
}
@@ -834,7 +843,7 @@ i915_error_object_create(struct drm_i915_private *i915,
I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
- ret = compress_page(&zstream, (void __force *)s, dst);
+ ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s);
if (ret)
@@ -849,7 +858,7 @@ unwind:
dst = NULL;
out:
- compress_fini(&zstream, dst);
+ compress_fini(&compress, dst);
ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
return dst;
}
@@ -941,7 +950,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
* strictly a client bug. Use instdone to differentiate those some.
*/
for (i = 0; i < I915_NUM_ENGINES; i++) {
- if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
+ if (error->engine[i].hangcheck_stalled) {
if (engine_id)
*engine_id = i;
@@ -1159,8 +1168,9 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->hws = I915_READ(mmio);
}
- ee->hangcheck_score = engine->hangcheck.score;
+ ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->hangcheck_action = engine->hangcheck.action;
+ ee->hangcheck_stalled = engine->hangcheck.stalled;
if (USES_PPGTT(dev_priv)) {
int i;
@@ -1188,6 +1198,7 @@ static void record_request(struct drm_i915_gem_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
+ erq->ban_score = request->ctx->ban_score;
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->head = request->head;
@@ -1321,7 +1332,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
}
error->simulated |=
- request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
+ i915_gem_context_no_error_capture(request->ctx);
ee->rq_head = request->head;
ee->rq_post = request->postfix;
@@ -1659,9 +1670,8 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
kref_put(&error_priv->error->ref, i915_error_state_free);
}
-void i915_destroy_error_state(struct drm_device *dev)
+void i915_destroy_error_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_error_state *error;
spin_lock_irq(&dev_priv->gpu_error.lock);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index a47e1e4aec03..35cf9918d09a 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -61,18 +61,27 @@
#define DMA_ADDRESS_SPACE_GTT (8 << 16)
#define DMA_COPY_SIZE _MMIO(0xc310)
#define DMA_CTRL _MMIO(0xc314)
+#define HUC_UKERNEL (1<<9)
#define UOS_MOVE (1<<4)
#define START_DMA (1<<0)
#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
+#define HUC_LOADING_AGENT_VCR (0<<1)
+#define HUC_LOADING_AGENT_GUC (1<<1)
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
+#define HUC_STATUS2 _MMIO(0xD3B0)
+#define HUC_FW_VERIFIED (1<<7)
+
/* Defines WOPCM space available to GuC firmware */
#define GUC_WOPCM_SIZE _MMIO(0xc050)
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
+/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
+#define GUC_GGTT_TOP 0xFEE00000
+
#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9_GT_PM_CONFIG _MMIO(0x13816c)
@@ -100,8 +109,8 @@
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
GUC_ENABLE_MIA_CLOCK_GATING)
-#define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
-#define HOST2GUC_TRIGGER (1<<0)
+#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
+#define GUC_SEND_TRIGGER (1<<0)
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 4462112725ef..8ced9e26f075 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -21,12 +21,9 @@
* IN THE SOFTWARE.
*
*/
-#include <linux/firmware.h>
#include <linux/circ_buf.h>
-#include <linux/debugfs.h>
-#include <linux/relay.h>
#include "i915_drv.h"
-#include "intel_guc.h"
+#include "intel_uc.h"
/**
* DOC: GuC-based command submission
@@ -49,7 +46,7 @@
* Firmware writes a success/fail code back to the action register after
* processes the request. The kernel driver polls waiting for this update and
* then proceeds.
- * See host2guc_action()
+ * See intel_guc_send()
*
* Doorbells:
* Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
@@ -66,141 +63,29 @@
*/
/*
- * Read GuC command/status register (SOFT_SCRATCH_0)
- * Return true if it contains a response rather than a command
- */
-static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
- u32 *status)
-{
- u32 val = I915_READ(SOFT_SCRATCH(0));
- *status = val;
- return GUC2HOST_IS_RESPONSE(val);
-}
-
-static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 status;
- int i;
- int ret;
-
- if (WARN_ON(len < 1 || len > 15))
- return -EINVAL;
-
- mutex_lock(&guc->action_lock);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-
- dev_priv->guc.action_count += 1;
- dev_priv->guc.action_cmd = data[0];
-
- for (i = 0; i < len; i++)
- I915_WRITE(SOFT_SCRATCH(i), data[i]);
-
- POSTING_READ(SOFT_SCRATCH(i - 1));
-
- I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
-
- /*
- * Fast commands should complete in less than 10us, so sample quickly
- * up to that length of time, then switch to a slower sleep-wait loop.
- * No HOST2GUC command should ever take longer than 10ms.
- */
- ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
- if (ret)
- ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
- if (status != GUC2HOST_STATUS_SUCCESS) {
- /*
- * Either the GuC explicitly returned an error (which
- * we convert to -EIO here) or no response at all was
- * received within the timeout limit (-ETIMEDOUT)
- */
- if (ret != -ETIMEDOUT)
- ret = -EIO;
-
- DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n",
- data[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
-
- dev_priv->guc.action_fail += 1;
- dev_priv->guc.action_err = ret;
- }
- dev_priv->guc.action_status = status;
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&guc->action_lock);
-
- return ret;
-}
-
-/*
* Tell the GuC to allocate or deallocate a specific doorbell
*/
-static int host2guc_allocate_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
-{
- u32 data[2];
-
- data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL;
- data[1] = client->ctx_index;
-
- return host2guc_action(guc, data, 2);
-}
-
-static int host2guc_release_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
-{
- u32 data[2];
-
- data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL;
- data[1] = client->ctx_index;
-
- return host2guc_action(guc, data, 2);
-}
-
-static int host2guc_sample_forcewake(struct intel_guc *guc,
- struct i915_guc_client *client)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 data[2];
-
- data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
- /* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
- data[1] = 0;
- else
- /* bit 0 and 1 are for Render and Media domain separately */
- data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
-
- return host2guc_action(guc, data, ARRAY_SIZE(data));
-}
-
-static int host2guc_logbuffer_flush_complete(struct intel_guc *guc)
-{
- u32 data[1];
-
- data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE;
-
- return host2guc_action(guc, data, 1);
-}
-
-static int host2guc_force_logbuffer_flush(struct intel_guc *guc)
+static int guc_allocate_doorbell(struct intel_guc *guc,
+ struct i915_guc_client *client)
{
- u32 data[2];
+ u32 action[] = {
+ INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
+ client->ctx_index
+ };
- data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH;
- data[1] = 0;
-
- return host2guc_action(guc, data, 2);
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static int host2guc_logging_control(struct intel_guc *guc, u32 control_val)
+static int guc_release_doorbell(struct intel_guc *guc,
+ struct i915_guc_client *client)
{
- u32 data[2];
-
- data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING;
- data[1] = control_val;
+ u32 action[] = {
+ INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
+ client->ctx_index
+ };
- return host2guc_action(guc, data, 2);
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/*
@@ -226,7 +111,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
test_bit(client->doorbell_id, doorbell_bitmap)) {
/* Deactivate the old doorbell */
doorbell->db_status = GUC_DOORBELL_DISABLED;
- (void)host2guc_release_doorbell(guc, client);
+ (void)guc_release_doorbell(guc, client);
__clear_bit(client->doorbell_id, doorbell_bitmap);
}
@@ -247,16 +132,9 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
/* Activate the new doorbell */
__set_bit(new_id, doorbell_bitmap);
- doorbell->cookie = 0;
doorbell->db_status = GUC_DOORBELL_ENABLED;
- return host2guc_allocate_doorbell(guc, client);
-}
-
-static int guc_init_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client,
- uint16_t db_id)
-{
- return guc_update_doorbell_id(guc, client, db_id);
+ doorbell->cookie = client->doorbell_cookie;
+ return guc_allocate_doorbell(guc, client);
}
static void guc_disable_doorbell(struct intel_guc *guc,
@@ -298,7 +176,7 @@ select_doorbell_register(struct intel_guc *guc, uint32_t priority)
* Select, assign and relase doorbell cachelines
*
* These functions track which doorbell cachelines are in use.
- * The data they manipulate is protected by the host2guc lock.
+ * The data they manipulate is protected by the intel_guc_send lock.
*/
static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
@@ -390,11 +268,11 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
/* The state page is after PPHWSP */
lrc->ring_lcra =
- i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+ guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
- lrc->ring_begin = i915_ggtt_offset(ce->ring->vma);
+ lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
@@ -410,7 +288,7 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
*/
- gfx_addr = i915_ggtt_offset(client->vma);
+ gfx_addr = guc_ggtt_offset(client->vma);
desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
desc.db_trigger_cpu =
@@ -464,22 +342,23 @@ static void guc_ctx_desc_fini(struct intel_guc *guc,
int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
- struct i915_guc_client *gc = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;
+ struct i915_guc_client *client = request->i915->guc.execbuf_client;
+ struct guc_process_desc *desc = client->vaddr +
+ client->proc_desc_offset;
u32 freespace;
int ret;
- spin_lock(&gc->wq_lock);
- freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
- freespace -= gc->wq_rsvd;
+ spin_lock(&client->wq_lock);
+ freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
+ freespace -= client->wq_rsvd;
if (likely(freespace >= wqi_size)) {
- gc->wq_rsvd += wqi_size;
+ client->wq_rsvd += wqi_size;
ret = 0;
} else {
- gc->no_wq_space++;
+ client->no_wq_space++;
ret = -EAGAIN;
}
- spin_unlock(&gc->wq_lock);
+ spin_unlock(&client->wq_lock);
return ret;
}
@@ -487,17 +366,17 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
- struct i915_guc_client *gc = request->i915->guc.execbuf_client;
+ struct i915_guc_client *client = request->i915->guc.execbuf_client;
- GEM_BUG_ON(READ_ONCE(gc->wq_rsvd) < wqi_size);
+ GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
- spin_lock(&gc->wq_lock);
- gc->wq_rsvd -= wqi_size;
- spin_unlock(&gc->wq_lock);
+ spin_lock(&client->wq_lock);
+ client->wq_rsvd -= wqi_size;
+ spin_unlock(&client->wq_lock);
}
/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct i915_guc_client *gc,
+static void guc_wq_item_append(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
/* wqi_len is in DWords, and does not include the one-word header */
@@ -508,10 +387,10 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
struct guc_wq_item *wqi;
u32 freespace, tail, wq_off;
- desc = gc->vaddr + gc->proc_desc_offset;
+ desc = client->vaddr + client->proc_desc_offset;
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
- freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+ freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */
@@ -528,17 +407,17 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
* workqueue buffer dw by dw.
*/
BUILD_BUG_ON(wqi_size != 16);
- GEM_BUG_ON(gc->wq_rsvd < wqi_size);
+ GEM_BUG_ON(client->wq_rsvd < wqi_size);
/* postincrement WQ tail for next time */
- wq_off = gc->wq_tail;
+ wq_off = client->wq_tail;
GEM_BUG_ON(wq_off & (wqi_size - 1));
- gc->wq_tail += wqi_size;
- gc->wq_tail &= gc->wq_size - 1;
- gc->wq_rsvd -= wqi_size;
+ client->wq_tail += wqi_size;
+ client->wq_tail &= client->wq_size - 1;
+ client->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
- wqi = gc->vaddr + wq_off + GUC_DB_SIZE;
+ wqi = client->vaddr + wq_off + GUC_DB_SIZE;
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
@@ -553,30 +432,30 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
wqi->fence_id = rq->global_seqno;
}
-static int guc_ring_doorbell(struct i915_guc_client *gc)
+static int guc_ring_doorbell(struct i915_guc_client *client)
{
struct guc_process_desc *desc;
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
- desc = gc->vaddr + gc->proc_desc_offset;
+ desc = client->vaddr + client->proc_desc_offset;
/* Update the tail so it is visible to GuC */
- desc->tail = gc->wq_tail;
+ desc->tail = client->wq_tail;
/* current cookie */
db_cmp.db_status = GUC_DOORBELL_ENABLED;
- db_cmp.cookie = gc->cookie;
+ db_cmp.cookie = client->doorbell_cookie;
/* cookie to be updated */
db_exc.db_status = GUC_DOORBELL_ENABLED;
- db_exc.cookie = gc->cookie + 1;
+ db_exc.cookie = client->doorbell_cookie + 1;
if (db_exc.cookie == 0)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = gc->vaddr + gc->doorbell_offset;
+ db = client->vaddr + client->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
@@ -586,7 +465,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
/* if the exchange was successfully executed */
if (db_ret.value_qw == db_cmp.value_qw) {
/* db was successfully rung */
- gc->cookie = db_exc.cookie;
+ client->doorbell_cookie = db_exc.cookie;
ret = 0;
break;
}
@@ -609,12 +488,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
}
/**
- * i915_guc_submit() - Submit commands through GuC
+ * __i915_guc_submit() - Submit commands through GuC
* @rq: request associated with the commands
*
- * Return: 0 on success, otherwise an errno.
- * (Note: nonzero really shouldn't happen!)
- *
* The caller must have already called i915_guc_wq_reserve() above with
* a result of 0 (success), guaranteeing that there is space in the work
* queue for the new request, so enqueuing the item cannot fail.
@@ -626,7 +502,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
* The only error here arises if the doorbell hardware isn't functioning
* as expected, which really shouln't happen.
*/
-static void i915_guc_submit(struct drm_i915_gem_request *rq)
+static void __i915_guc_submit(struct drm_i915_gem_request *rq)
{
struct drm_i915_private *dev_priv = rq->i915;
struct intel_engine_cs *engine = rq->engine;
@@ -635,17 +511,6 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
struct i915_guc_client *client = guc->execbuf_client;
int b_ret;
- /* We keep the previous context alive until we retire the following
- * request. This ensures that any the context object is still pinned
- * for any residual writes the HW makes into it on the context switch
- * into the next object following the breadcrumb. Otherwise, we may
- * retire the context too early.
- */
- rq->previous_context = engine->last_context;
- engine->last_context = rq->ctx;
-
- i915_gem_request_submit(rq);
-
spin_lock(&client->wq_lock);
guc_wq_item_append(client, rq);
@@ -665,6 +530,12 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
spin_unlock(&client->wq_lock);
}
+static void i915_guc_submit(struct drm_i915_gem_request *rq)
+{
+ i915_gem_request_submit(rq);
+ __i915_guc_submit(rq);
+}
+
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
@@ -672,7 +543,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
*/
/**
- * guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
+ * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
* @guc: the guc
* @size: size of area to allocate (both virtual space and memory)
*
@@ -684,18 +555,18 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
*
* Return: A i915_vma if successful, otherwise an ERR_PTR.
*/
-static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret;
- obj = i915_gem_object_create(&dev_priv->drm, size);
+ obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err;
@@ -706,9 +577,6 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
goto err;
}
- /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
- I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-
return vma;
err:
@@ -779,8 +647,7 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
uint16_t db_id;
int i, err;
- /* Save client's original doorbell selection */
- db_id = client->doorbell_id;
+ guc_disable_doorbell(guc, client);
for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
/* Skip if doorbell is OK */
@@ -793,7 +660,9 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
i, err);
}
- /* Restore to original value */
+ db_id = select_doorbell_register(guc, client->priority);
+ WARN_ON(db_id == GUC_INVALID_DOORBELL_ID);
+
err = guc_update_doorbell_id(guc, client, db_id);
if (err)
DRM_WARN("Failed to restore doorbell to %d, err %d\n",
@@ -847,7 +716,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
}
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
- vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
+ vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
if (IS_ERR(vma))
goto err;
@@ -883,8 +752,13 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
guc_proc_desc_init(guc, client);
guc_ctx_desc_init(guc, client);
- if (guc_init_doorbell(guc, client, db_id))
- goto err;
+
+ /* For runtime client allocation we need to enable the doorbell. Not
+ * required yet for the static execbuf_client as this special kernel
+ * client is enabled from i915_guc_submission_enable().
+ *
+ * guc_update_doorbell_id(guc, client, db_id);
+ */
DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
priority, client, client->engines, client->ctx_index);
@@ -898,488 +772,7 @@ err:
return NULL;
}
-/*
- * Sub buffer switch callback. Called whenever relay has to switch to a new
- * sub buffer, relay stays on the same sub buffer if 0 is returned.
- */
-static int subbuf_start_callback(struct rchan_buf *buf,
- void *subbuf,
- void *prev_subbuf,
- size_t prev_padding)
-{
- /* Use no-overwrite mode by default, where relay will stop accepting
- * new data if there are no empty sub buffers left.
- * There is no strict synchronization enforced by relay between Consumer
- * and Producer. In overwrite mode, there is a possibility of getting
- * inconsistent/garbled data, the producer could be writing on to the
- * same sub buffer from which Consumer is reading. This can't be avoided
- * unless Consumer is fast enough and can always run in tandem with
- * Producer.
- */
- if (relay_buf_full(buf))
- return 0;
-
- return 1;
-}
-
-/*
- * file_create() callback. Creates relay file in debugfs.
- */
-static struct dentry *create_buf_file_callback(const char *filename,
- struct dentry *parent,
- umode_t mode,
- struct rchan_buf *buf,
- int *is_global)
-{
- struct dentry *buf_file;
-
- /* This to enable the use of a single buffer for the relay channel and
- * correspondingly have a single file exposed to User, through which
- * it can collect the logs in order without any post-processing.
- * Need to set 'is_global' even if parent is NULL for early logging.
- */
- *is_global = 1;
-
- if (!parent)
- return NULL;
-
- /* Not using the channel filename passed as an argument, since for each
- * channel relay appends the corresponding CPU number to the filename
- * passed in relay_open(). This should be fine as relay just needs a
- * dentry of the file associated with the channel buffer and that file's
- * name need not be same as the filename passed as an argument.
- */
- buf_file = debugfs_create_file("guc_log", mode,
- parent, buf, &relay_file_operations);
- return buf_file;
-}
-
-/*
- * file_remove() default callback. Removes relay file in debugfs.
- */
-static int remove_buf_file_callback(struct dentry *dentry)
-{
- debugfs_remove(dentry);
- return 0;
-}
-
-/* relay channel callbacks */
-static struct rchan_callbacks relay_callbacks = {
- .subbuf_start = subbuf_start_callback,
- .create_buf_file = create_buf_file_callback,
- .remove_buf_file = remove_buf_file_callback,
-};
-
-static void guc_log_remove_relay_file(struct intel_guc *guc)
-{
- relay_close(guc->log.relay_chan);
-}
-
-static int guc_log_create_relay_channel(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct rchan *guc_log_relay_chan;
- size_t n_subbufs, subbuf_size;
-
- /* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = guc->log.vma->obj->base.size;
-
- /* Store up to 8 snapshots, which is large enough to buffer sufficient
- * boot time logs and provides enough leeway to User, in terms of
- * latency, for consuming the logs from relay. Also doesn't take
- * up too much memory.
- */
- n_subbufs = 8;
-
- guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
- n_subbufs, &relay_callbacks, dev_priv);
- if (!guc_log_relay_chan) {
- DRM_ERROR("Couldn't create relay chan for GuC logging\n");
- return -ENOMEM;
- }
-
- GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
- guc->log.relay_chan = guc_log_relay_chan;
- return 0;
-}
-
-static int guc_log_create_relay_file(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct dentry *log_dir;
- int ret;
-
- /* For now create the log file in /sys/kernel/debug/dri/0 dir */
- log_dir = dev_priv->drm.primary->debugfs_root;
-
- /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
- * not mounted and so can't create the relay file.
- * The relay API seems to fit well with debugfs only, for availing relay
- * there are 3 requirements which can be met for debugfs file only in a
- * straightforward/clean manner :-
- * i) Need the associated dentry pointer of the file, while opening the
- * relay channel.
- * ii) Should be able to use 'relay_file_operations' fops for the file.
- * iii) Set the 'i_private' field of file's inode to the pointer of
- * relay channel buffer.
- */
- if (!log_dir) {
- DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
- return -ENODEV;
- }
-
- ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
- if (ret) {
- DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void guc_move_to_next_buf(struct intel_guc *guc)
-{
- /* Make sure the updates made in the sub buffer are visible when
- * Consumer sees the following update to offset inside the sub buffer.
- */
- smp_wmb();
-
- /* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
-
- /* Switch to the next sub buffer */
- relay_flush(guc->log.relay_chan);
-}
-
-static void *guc_get_write_buffer(struct intel_guc *guc)
-{
- if (!guc->log.relay_chan)
- return NULL;
-
- /* Just get the base address of a new sub buffer and copy data into it
- * ourselves. NULL will be returned in no-overwrite mode, if all sub
- * buffers are full. Could have used the relay_write() to indirectly
- * copy the data, but that would have been bit convoluted, as we need to
- * write to only certain locations inside a sub buffer which cannot be
- * done without using relay_reserve() along with relay_write(). So its
- * better to use relay_reserve() alone.
- */
- return relay_reserve(guc->log.relay_chan, 0);
-}
-
-static bool
-guc_check_log_buf_overflow(struct intel_guc *guc,
- enum guc_log_buffer_type type, unsigned int full_cnt)
-{
- unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
- bool overflow = false;
-
- if (full_cnt != prev_full_cnt) {
- overflow = true;
-
- guc->log.prev_overflow_count[type] = full_cnt;
- guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
-
- if (full_cnt < prev_full_cnt) {
- /* buffer_full_cnt is a 4 bit counter */
- guc->log.total_overflow_count[type] += 16;
- }
- DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
- }
-
- return overflow;
-}
-
-static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
-{
- switch (type) {
- case GUC_ISR_LOG_BUFFER:
- return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
- case GUC_DPC_LOG_BUFFER:
- return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
- case GUC_CRASH_DUMP_LOG_BUFFER:
- return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
- default:
- MISSING_CASE(type);
- }
-
- return 0;
-}
-
-static void guc_read_update_log_buffer(struct intel_guc *guc)
-{
- unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
- struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
- struct guc_log_buffer_state log_buf_state_local;
- enum guc_log_buffer_type type;
- void *src_data, *dst_data;
- bool new_overflow;
-
- if (WARN_ON(!guc->log.buf_addr))
- return;
-
- /* Get the pointer to shared GuC log buffer */
- log_buf_state = src_data = guc->log.buf_addr;
-
- /* Get the pointer to local buffer to store the logs */
- log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
- /* Actual logs are present from the 2nd page */
- src_data += PAGE_SIZE;
- dst_data += PAGE_SIZE;
-
- for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
- /* Make a copy of the state structure, inside GuC log buffer
- * (which is uncached mapped), on the stack to avoid reading
- * from it multiple times.
- */
- memcpy(&log_buf_state_local, log_buf_state,
- sizeof(struct guc_log_buffer_state));
- buffer_size = guc_get_log_buffer_size(type);
- read_offset = log_buf_state_local.read_ptr;
- write_offset = log_buf_state_local.sampled_write_ptr;
- full_cnt = log_buf_state_local.buffer_full_cnt;
-
- /* Bookkeeping stuff */
- guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
- new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
-
- /* Update the state of shared log buffer */
- log_buf_state->read_ptr = write_offset;
- log_buf_state->flush_to_file = 0;
- log_buf_state++;
-
- if (unlikely(!log_buf_snapshot_state))
- continue;
-
- /* First copy the state structure in snapshot buffer */
- memcpy(log_buf_snapshot_state, &log_buf_state_local,
- sizeof(struct guc_log_buffer_state));
-
- /* The write pointer could have been updated by GuC firmware,
- * after sending the flush interrupt to Host, for consistency
- * set write pointer value to same value of sampled_write_ptr
- * in the snapshot buffer.
- */
- log_buf_snapshot_state->write_ptr = write_offset;
- log_buf_snapshot_state++;
-
- /* Now copy the actual logs. */
- if (unlikely(new_overflow)) {
- /* copy the whole buffer in case of overflow */
- read_offset = 0;
- write_offset = buffer_size;
- } else if (unlikely((read_offset > buffer_size) ||
- (write_offset > buffer_size))) {
- DRM_ERROR("invalid log buffer state\n");
- /* copy whole buffer as offsets are unreliable */
- read_offset = 0;
- write_offset = buffer_size;
- }
-
- /* Just copy the newly written data */
- if (read_offset > write_offset) {
- i915_memcpy_from_wc(dst_data, src_data, write_offset);
- bytes_to_copy = buffer_size - read_offset;
- } else {
- bytes_to_copy = write_offset - read_offset;
- }
- i915_memcpy_from_wc(dst_data + read_offset,
- src_data + read_offset, bytes_to_copy);
-
- src_data += buffer_size;
- dst_data += buffer_size;
- }
-
- if (log_buf_snapshot_state)
- guc_move_to_next_buf(guc);
- else {
- /* Used rate limited to avoid deluge of messages, logs might be
- * getting consumed by User at a slow rate.
- */
- DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
- guc->log.capture_miss_count++;
- }
-}
-
-static void guc_capture_logs_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, guc.log.flush_work);
-
- i915_guc_capture_logs(dev_priv);
-}
-
-static void guc_log_cleanup(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- /* First disable the flush interrupt */
- gen9_disable_guc_interrupts(dev_priv);
-
- if (guc->log.flush_wq)
- destroy_workqueue(guc->log.flush_wq);
-
- guc->log.flush_wq = NULL;
-
- if (guc->log.relay_chan)
- guc_log_remove_relay_file(guc);
-
- guc->log.relay_chan = NULL;
-
- if (guc->log.buf_addr)
- i915_gem_object_unpin_map(guc->log.vma->obj);
-
- guc->log.buf_addr = NULL;
-}
-
-static int guc_log_create_extras(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- void *vaddr;
- int ret;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- /* Nothing to do */
- if (i915.guc_log_level < 0)
- return 0;
-
- if (!guc->log.buf_addr) {
- /* Create a WC (Uncached for read) vmalloc mapping of log
- * buffer pages, so that we can directly get the data
- * (up-to-date) from memory.
- */
- vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
- return ret;
- }
-
- guc->log.buf_addr = vaddr;
- }
-
- if (!guc->log.relay_chan) {
- /* Create a relay channel, so that we have buffers for storing
- * the GuC firmware logs, the channel will be linked with a file
- * later on when debugfs is registered.
- */
- ret = guc_log_create_relay_channel(guc);
- if (ret)
- return ret;
- }
-
- if (!guc->log.flush_wq) {
- INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);
-
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (guc->log.flush_wq == NULL) {
- DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static void guc_log_create(struct intel_guc *guc)
-{
- struct i915_vma *vma;
- unsigned long offset;
- uint32_t size, flags;
-
- if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
- i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
-
- /* The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap */
- size = (1 + GUC_LOG_DPC_PAGES + 1 +
- GUC_LOG_ISR_PAGES + 1 +
- GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
-
- vma = guc->log.vma;
- if (!vma) {
- /* We require SSE 4.1 for fast reads from the GuC log buffer and
- * it should be present on the chipsets supporting GuC based
- * submisssions.
- */
- if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) {
- /* logging will not be enabled */
- i915.guc_log_level = -1;
- return;
- }
-
- vma = guc_allocate_vma(guc, size);
- if (IS_ERR(vma)) {
- /* logging will be off */
- i915.guc_log_level = -1;
- return;
- }
-
- guc->log.vma = vma;
-
- if (guc_log_create_extras(guc)) {
- guc_log_cleanup(guc);
- i915_vma_unpin_and_release(&guc->log.vma);
- i915.guc_log_level = -1;
- return;
- }
- }
-
- /* each allocated unit is a page */
- flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
- (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
- (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
- (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
-
- offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
- guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
-}
-
-static int guc_log_late_setup(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- if (i915.guc_log_level < 0)
- return -EINVAL;
-
- /* If log_level was set as -1 at boot time, then setup needed to
- * handle log buffer flush interrupts would not have been done yet,
- * so do that now.
- */
- ret = guc_log_create_extras(guc);
- if (ret)
- goto err;
-
- ret = guc_log_create_relay_file(guc);
- if (ret)
- goto err;
-
- return 0;
-err:
- guc_log_cleanup(guc);
- /* logging will remain off */
- i915.guc_log_level = -1;
- return ret;
-}
static void guc_policies_init(struct guc_policies *policies)
{
@@ -1422,7 +815,7 @@ static void guc_addon_create(struct intel_guc *guc)
vma = guc->ads_vma;
if (!vma) {
- vma = guc_allocate_vma(guc, PAGE_ALIGN(size));
+ vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(size));
if (IS_ERR(vma))
return;
@@ -1450,7 +843,7 @@ static void guc_addon_create(struct intel_guc *guc)
guc_policies_init(policies);
ads->scheduler_policies =
- i915_ggtt_offset(vma) + sizeof(struct guc_ads);
+ guc_ggtt_offset(vma) + sizeof(struct guc_ads);
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
@@ -1484,6 +877,9 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
+ if (!HAS_GUC_SCHED(dev_priv))
+ return 0;
+
/* Wipe bitmap & delete client in case of reinitialisation */
bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
i915_guc_submission_disable(dev_priv);
@@ -1494,52 +890,68 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
if (guc->ctx_pool_vma)
return 0; /* already allocated */
- vma = guc_allocate_vma(guc, gemsize);
+ vma = intel_guc_allocate_vma(guc, gemsize);
if (IS_ERR(vma))
return PTR_ERR(vma);
guc->ctx_pool_vma = vma;
ida_init(&guc->ctx_ids);
- mutex_init(&guc->action_lock);
- guc_log_create(guc);
+ intel_guc_log_create(guc);
guc_addon_create(guc);
+ guc->execbuf_client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ GUC_CTX_PRIORITY_KMD_NORMAL,
+ dev_priv->kernel_context);
+ if (!guc->execbuf_client) {
+ DRM_ERROR("Failed to create GuC client for execbuf!\n");
+ goto err;
+ }
+
return 0;
+
+err:
+ i915_guc_submission_fini(dev_priv);
+ return -ENOMEM;
+}
+
+static void guc_reset_wq(struct i915_guc_client *client)
+{
+ struct guc_process_desc *desc = client->vaddr +
+ client->proc_desc_offset;
+
+ desc->head = 0;
+ desc->tail = 0;
+
+ client->wq_tail = 0;
}
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- struct drm_i915_gem_request *request;
- struct i915_guc_client *client;
+ struct i915_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- /* client for execbuf submission */
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
- GUC_CTX_PRIORITY_KMD_NORMAL,
- dev_priv->kernel_context);
- if (!client) {
- DRM_ERROR("Failed to create normal GuC client!\n");
- return -ENOMEM;
- }
+ if (!client)
+ return -ENODEV;
+
+ intel_guc_sample_forcewake(guc);
- guc->execbuf_client = client;
- host2guc_sample_forcewake(guc, client);
+ guc_reset_wq(client);
guc_init_doorbell_hw(guc);
/* Take over from manual control of ELSP (execlists) */
for_each_engine(engine, dev_priv, id) {
+ struct drm_i915_gem_request *rq;
+
engine->submit_request = i915_guc_submit;
engine->schedule = NULL;
/* Replay the current set of previously submitted requests */
- list_for_each_entry(request,
- &engine->timeline->requests, link) {
+ list_for_each_entry(rq, &engine->timeline->requests, link) {
client->wq_rsvd += sizeof(struct guc_wq_item);
- if (i915_sw_fence_done(&request->submit))
- i915_guc_submit(request);
+ __i915_guc_submit(rq);
}
}
@@ -1555,14 +967,18 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
/* Revert back to manual ELSP submission */
intel_execlists_enable_submission(dev_priv);
-
- guc_client_free(dev_priv, guc->execbuf_client);
- guc->execbuf_client = NULL;
}
void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
+ struct i915_guc_client *client;
+
+ client = fetch_and_zero(&guc->execbuf_client);
+ if (!client)
+ return;
+
+ guc_client_free(dev_priv, client);
i915_vma_unpin_and_release(&guc->ads_vma);
i915_vma_unpin_and_release(&guc->log.vma);
@@ -1574,44 +990,42 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
/**
* intel_guc_suspend() - notify GuC entering suspend state
- * @dev: drm device
+ * @dev_priv: i915 device private
*/
-int intel_guc_suspend(struct drm_device *dev)
+int intel_guc_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc *guc = &dev_priv->guc;
struct i915_gem_context *ctx;
u32 data[3];
- if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
gen9_disable_guc_interrupts(dev_priv);
ctx = dev_priv->kernel_context;
- data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
+ data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1;
/* first page is shared data with GuC */
- data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
+ data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
- return host2guc_action(guc, data, ARRAY_SIZE(data));
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
/**
* intel_guc_resume() - notify GuC resuming from suspend state
- * @dev: drm device
+ * @dev_priv: i915 device private
*/
-int intel_guc_resume(struct drm_device *dev)
+int intel_guc_resume(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc *guc = &dev_priv->guc;
struct i915_gem_context *ctx;
u32 data[3];
- if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
if (i915.guc_log_level >= 0)
@@ -1619,111 +1033,12 @@ int intel_guc_resume(struct drm_device *dev)
ctx = dev_priv->kernel_context;
- data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
+ data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
/* first page is shared data with GuC */
- data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
+ data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
- return host2guc_action(guc, data, ARRAY_SIZE(data));
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
-void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
-{
- guc_read_update_log_buffer(&dev_priv->guc);
- /* Generally device is expected to be active only at this
- * time, so get/put should be really quick.
- */
- intel_runtime_pm_get(dev_priv);
- host2guc_logbuffer_flush_complete(&dev_priv->guc);
- intel_runtime_pm_put(dev_priv);
-}
-
-void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
-{
- if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
- return;
-
- /* First disable the interrupts, will be renabled afterwards */
- gen9_disable_guc_interrupts(dev_priv);
-
- /* Before initiating the forceful flush, wait for any pending/ongoing
- * flush to complete otherwise forceful flush may not actually happen.
- */
- flush_work(&dev_priv->guc.log.flush_work);
-
- /* Ask GuC to update the log buffer state */
- host2guc_force_logbuffer_flush(&dev_priv->guc);
-
- /* GuC would have updated log buffer by now, so capture it */
- i915_guc_capture_logs(dev_priv);
-}
-
-void i915_guc_unregister(struct drm_i915_private *dev_priv)
-{
- if (!i915.enable_guc_submission)
- return;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- guc_log_cleanup(&dev_priv->guc);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-void i915_guc_register(struct drm_i915_private *dev_priv)
-{
- if (!i915.enable_guc_submission)
- return;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- guc_log_late_setup(&dev_priv->guc);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
-{
- union guc_log_control log_param;
- int ret;
-
- log_param.value = control_val;
-
- if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
- log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
- return -EINVAL;
-
- /* This combination doesn't make sense & won't have any effect */
- if (!log_param.logging_enabled && (i915.guc_log_level < 0))
- return 0;
-
- ret = host2guc_logging_control(&dev_priv->guc, log_param.value);
- if (ret < 0) {
- DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret);
- return ret;
- }
-
- i915.guc_log_level = log_param.verbosity;
-
- /* If log_level was set as -1 at boot time, then the relay channel file
- * wouldn't have been created by now and interrupts also would not have
- * been enabled.
- */
- if (!dev_priv->guc.log.relay_chan) {
- ret = guc_log_late_setup(&dev_priv->guc);
- if (!ret)
- gen9_enable_guc_interrupts(dev_priv);
- } else if (!log_param.logging_enabled) {
- /* Once logging is disabled, GuC won't generate logs & send an
- * interrupt. But there could be some data in the log buffer
- * which is yet to be captured. So request GuC to update the log
- * buffer state and then collect the left over logs.
- */
- i915_guc_flush_logs(dev_priv);
-
- /* As logging is disabled, update log level to reflect that */
- i915.guc_log_level = -1;
- } else {
- /* In case interrupts were disabled, enable them now */
- gen9_enable_guc_interrupts(dev_priv);
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f0880afbb878..e6ffef2f707a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1170,6 +1170,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
+
+ if (new_delay >= dev_priv->rps.max_freq_softlimit)
+ adj = 0;
/*
* For better performance, jump directly
* to RPe if we're below it.
@@ -1191,6 +1194,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
+
+ if (new_delay <= dev_priv->rps.min_freq_softlimit)
+ adj = 0;
} else { /* unknown event */
adj = 0;
}
@@ -1553,41 +1559,68 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_pipe_crc_entry *entry;
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct drm_driver *driver = dev_priv->drm.driver;
+ uint32_t crcs[5];
int head, tail;
spin_lock(&pipe_crc->lock);
+ if (pipe_crc->source) {
+ if (!pipe_crc->entries) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_DEBUG_KMS("spurious interrupt\n");
+ return;
+ }
- if (!pipe_crc->entries) {
- spin_unlock(&pipe_crc->lock);
- DRM_DEBUG_KMS("spurious interrupt\n");
- return;
- }
-
- head = pipe_crc->head;
- tail = pipe_crc->tail;
+ head = pipe_crc->head;
+ tail = pipe_crc->tail;
- if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
- spin_unlock(&pipe_crc->lock);
- DRM_ERROR("CRC buffer overflowing\n");
- return;
- }
+ if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_ERROR("CRC buffer overflowing\n");
+ return;
+ }
- entry = &pipe_crc->entries[head];
+ entry = &pipe_crc->entries[head];
- entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
- pipe);
- entry->crc[0] = crc0;
- entry->crc[1] = crc1;
- entry->crc[2] = crc2;
- entry->crc[3] = crc3;
- entry->crc[4] = crc4;
+ entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
+ entry->crc[0] = crc0;
+ entry->crc[1] = crc1;
+ entry->crc[2] = crc2;
+ entry->crc[3] = crc3;
+ entry->crc[4] = crc4;
- head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
- pipe_crc->head = head;
+ head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+ pipe_crc->head = head;
- spin_unlock(&pipe_crc->lock);
+ spin_unlock(&pipe_crc->lock);
- wake_up_interruptible(&pipe_crc->wq);
+ wake_up_interruptible(&pipe_crc->wq);
+ } else {
+ /*
+ * For some not yet identified reason, the first CRC is
+ * bonkers. So let's just wait for the next vblank and read
+ * out the buggy result.
+ *
+ * On CHV sometimes the second CRC is bonkers as well, so
+ * don't trust that one either.
+ */
+ if (pipe_crc->skipped == 0 ||
+ (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
+ pipe_crc->skipped++;
+ spin_unlock(&pipe_crc->lock);
+ return;
+ }
+ spin_unlock(&pipe_crc->lock);
+ crcs[0] = crc0;
+ crcs[1] = crc1;
+ crcs[2] = crc2;
+ crcs[3] = crc3;
+ crcs[4] = crc4;
+ drm_crtc_add_crc_entry(&crtc->base, true,
+ drm_accurate_vblank_count(&crtc->base),
+ crcs);
+ }
}
#else
static inline void
@@ -1683,8 +1716,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
u32 msg, flush;
msg = I915_READ(SOFT_SCRATCH(15));
- flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED |
- GUC2HOST_MSG_FLUSH_LOG_BUFFER);
+ flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
+ INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
if (flush) {
/* Clear the message bits that are handled */
I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
@@ -2444,7 +2477,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
found = true;
}
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
bxt_hpd_irq_handler(dev_priv, tmp_mask,
@@ -2460,7 +2493,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
}
- if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
+ if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
gmbus_irq_handler(dev_priv);
found = true;
}
@@ -2712,12 +2745,13 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
* i915_handle_error - handle a gpu error
* @dev_priv: i915 device private
* @engine_mask: mask representing engines that are hung
+ * @fmt: Error message format string
+ *
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
- * @fmt: Error message format string
*/
void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,
@@ -3105,19 +3139,16 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
-static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, hotplug, enabled_irqs;
-
- hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
-
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ u32 hotplug;
/* Enable digital hotplug on the PCH */
hotplug = I915_READ(PCH_PORT_HOTPLUG);
- hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
- PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
+ hotplug |= PORTA_HOTPLUG_ENABLE |
+ PORTB_HOTPLUG_ENABLE |
+ PORTC_HOTPLUG_ENABLE |
+ PORTD_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
hotplug = I915_READ(PCH_PORT_HOTPLUG2);
@@ -3125,6 +3156,18 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
}
+static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ spt_hpd_detection_setup(dev_priv);
+}
+
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, hotplug, enabled_irqs;
@@ -3159,18 +3202,15 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_irq_setup(dev_priv);
}
-static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
+ u32 enabled_irqs)
{
- u32 hotplug_irqs, hotplug, enabled_irqs;
-
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
- hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
-
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ u32 hotplug;
hotplug = I915_READ(PCH_PORT_HOTPLUG);
- hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
- PORTA_HOTPLUG_ENABLE;
+ hotplug |= PORTA_HOTPLUG_ENABLE |
+ PORTB_HOTPLUG_ENABLE |
+ PORTC_HOTPLUG_ENABLE;
DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
hotplug, enabled_irqs);
@@ -3180,7 +3220,6 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
* For BXT invert bit has to be set based on AOB design
* for HPD detection logic, update it based on VBT fields.
*/
-
if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
hotplug |= BXT_DDIA_HPD_INVERT;
@@ -3194,6 +3233,23 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
+static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
+}
+
+static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
+ hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
+
+ bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+
+ __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
+}
+
static void ibx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3209,6 +3265,12 @@ static void ibx_irq_postinstall(struct drm_device *dev)
gen5_assert_iir_is_zero(dev_priv, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
+
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
+ HAS_PCH_LPT(dev_priv))
+ ; /* TODO: Enable HPD detection on older PCH platforms too */
+ else
+ spt_hpd_detection_setup(dev_priv);
}
static void gen5_gt_irq_postinstall(struct drm_device *dev)
@@ -3391,7 +3453,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS;
} else {
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
@@ -3402,7 +3464,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN8_PIPE_FIFO_UNDERRUN;
de_port_enables = de_port_masked;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
@@ -3420,6 +3482,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+
+ if (IS_GEN9_LP(dev_priv))
+ bxt_hpd_detection_setup(dev_priv);
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4227,7 +4292,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
new file mode 100644
index 000000000000..4ddf756add31
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -0,0 +1,752 @@
+/*
+ * Autogenerated file, DO NOT EDIT manually!
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_hsw.h"
+
+enum metric_set_id {
+ METRIC_SET_ID_RENDER_BASIC = 1,
+ METRIC_SET_ID_COMPUTE_BASIC,
+ METRIC_SET_ID_COMPUTE_EXTENDED,
+ METRIC_SET_ID_MEMORY_READS,
+ METRIC_SET_ID_MEMORY_WRITES,
+ METRIC_SET_ID_SAMPLER_BALANCE,
+};
+
+int i915_oa_n_builtin_metric_sets_hsw = 6;
+
+static const struct i915_oa_reg b_counter_config_render_basic[] = {
+ { _MMIO(0x2724), 0x00800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2714), 0x00800000 },
+ { _MMIO(0x2710), 0x00000000 },
+};
+
+static const struct i915_oa_reg mux_config_render_basic[] = {
+ { _MMIO(0x253a4), 0x01600000 },
+ { _MMIO(0x25440), 0x00100000 },
+ { _MMIO(0x25128), 0x00000000 },
+ { _MMIO(0x2691c), 0x00000800 },
+ { _MMIO(0x26aa0), 0x01500000 },
+ { _MMIO(0x26b9c), 0x00006000 },
+ { _MMIO(0x2791c), 0x00000800 },
+ { _MMIO(0x27aa0), 0x01500000 },
+ { _MMIO(0x27b9c), 0x00006000 },
+ { _MMIO(0x2641c), 0x00000400 },
+ { _MMIO(0x25380), 0x00000010 },
+ { _MMIO(0x2538c), 0x00000000 },
+ { _MMIO(0x25384), 0x0800aaaa },
+ { _MMIO(0x25400), 0x00000004 },
+ { _MMIO(0x2540c), 0x06029000 },
+ { _MMIO(0x25410), 0x00000002 },
+ { _MMIO(0x25404), 0x5c30ffff },
+ { _MMIO(0x25100), 0x00000016 },
+ { _MMIO(0x25110), 0x00000400 },
+ { _MMIO(0x25104), 0x00000000 },
+ { _MMIO(0x26804), 0x00001211 },
+ { _MMIO(0x26884), 0x00000100 },
+ { _MMIO(0x26900), 0x00000002 },
+ { _MMIO(0x26908), 0x00700000 },
+ { _MMIO(0x26904), 0x00000000 },
+ { _MMIO(0x26984), 0x00001022 },
+ { _MMIO(0x26a04), 0x00000011 },
+ { _MMIO(0x26a80), 0x00000006 },
+ { _MMIO(0x26a88), 0x00000c02 },
+ { _MMIO(0x26a84), 0x00000000 },
+ { _MMIO(0x26b04), 0x00001000 },
+ { _MMIO(0x26b80), 0x00000002 },
+ { _MMIO(0x26b8c), 0x00000007 },
+ { _MMIO(0x26b84), 0x00000000 },
+ { _MMIO(0x27804), 0x00004844 },
+ { _MMIO(0x27884), 0x00000400 },
+ { _MMIO(0x27900), 0x00000002 },
+ { _MMIO(0x27908), 0x0e000000 },
+ { _MMIO(0x27904), 0x00000000 },
+ { _MMIO(0x27984), 0x00004088 },
+ { _MMIO(0x27a04), 0x00000044 },
+ { _MMIO(0x27a80), 0x00000006 },
+ { _MMIO(0x27a88), 0x00018040 },
+ { _MMIO(0x27a84), 0x00000000 },
+ { _MMIO(0x27b04), 0x00004000 },
+ { _MMIO(0x27b80), 0x00000002 },
+ { _MMIO(0x27b8c), 0x000000e0 },
+ { _MMIO(0x27b84), 0x00000000 },
+ { _MMIO(0x26104), 0x00002222 },
+ { _MMIO(0x26184), 0x0c006666 },
+ { _MMIO(0x26284), 0x04000000 },
+ { _MMIO(0x26304), 0x04000000 },
+ { _MMIO(0x26400), 0x00000002 },
+ { _MMIO(0x26410), 0x000000a0 },
+ { _MMIO(0x26404), 0x00000000 },
+ { _MMIO(0x25420), 0x04108020 },
+ { _MMIO(0x25424), 0x1284a420 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x00042049 },
+};
+
+static const struct i915_oa_reg *
+get_render_basic_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_render_basic);
+ return mux_config_render_basic;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_basic[] = {
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2714), 0x00800000 },
+ { _MMIO(0x2718), 0xaaaaaaaa },
+ { _MMIO(0x271c), 0xaaaaaaaa },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2724), 0x00800000 },
+ { _MMIO(0x2728), 0xaaaaaaaa },
+ { _MMIO(0x272c), 0xaaaaaaaa },
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2744), 0x00000000 },
+ { _MMIO(0x2748), 0x00000000 },
+ { _MMIO(0x274c), 0x00000000 },
+ { _MMIO(0x2750), 0x00000000 },
+ { _MMIO(0x2754), 0x00000000 },
+ { _MMIO(0x2758), 0x00000000 },
+ { _MMIO(0x275c), 0x00000000 },
+ { _MMIO(0x236c), 0x00000000 },
+};
+
+static const struct i915_oa_reg mux_config_compute_basic[] = {
+ { _MMIO(0x253a4), 0x00000000 },
+ { _MMIO(0x2681c), 0x01f00800 },
+ { _MMIO(0x26820), 0x00001000 },
+ { _MMIO(0x2781c), 0x01f00800 },
+ { _MMIO(0x26520), 0x00000007 },
+ { _MMIO(0x265a0), 0x00000007 },
+ { _MMIO(0x25380), 0x00000010 },
+ { _MMIO(0x2538c), 0x00300000 },
+ { _MMIO(0x25384), 0xaa8aaaaa },
+ { _MMIO(0x25404), 0xffffffff },
+ { _MMIO(0x26800), 0x00004202 },
+ { _MMIO(0x26808), 0x00605817 },
+ { _MMIO(0x2680c), 0x10001005 },
+ { _MMIO(0x26804), 0x00000000 },
+ { _MMIO(0x27800), 0x00000102 },
+ { _MMIO(0x27808), 0x0c0701e0 },
+ { _MMIO(0x2780c), 0x000200a0 },
+ { _MMIO(0x27804), 0x00000000 },
+ { _MMIO(0x26484), 0x44000000 },
+ { _MMIO(0x26704), 0x44000000 },
+ { _MMIO(0x26500), 0x00000006 },
+ { _MMIO(0x26510), 0x00000001 },
+ { _MMIO(0x26504), 0x88000000 },
+ { _MMIO(0x26580), 0x00000006 },
+ { _MMIO(0x26590), 0x00000020 },
+ { _MMIO(0x26584), 0x00000000 },
+ { _MMIO(0x26104), 0x55822222 },
+ { _MMIO(0x26184), 0xaa866666 },
+ { _MMIO(0x25420), 0x08320c83 },
+ { _MMIO(0x25424), 0x06820c83 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x00000c03 },
+};
+
+static const struct i915_oa_reg *
+get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_compute_basic);
+ return mux_config_compute_basic;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_extended[] = {
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2770), 0x0007fe2a },
+ { _MMIO(0x2774), 0x0000ff00 },
+ { _MMIO(0x2778), 0x0007fe6a },
+ { _MMIO(0x277c), 0x0000ff00 },
+ { _MMIO(0x2780), 0x0007fe92 },
+ { _MMIO(0x2784), 0x0000ff00 },
+ { _MMIO(0x2788), 0x0007fea2 },
+ { _MMIO(0x278c), 0x0000ff00 },
+ { _MMIO(0x2790), 0x0007fe32 },
+ { _MMIO(0x2794), 0x0000ff00 },
+ { _MMIO(0x2798), 0x0007fe9a },
+ { _MMIO(0x279c), 0x0000ff00 },
+ { _MMIO(0x27a0), 0x0007ff23 },
+ { _MMIO(0x27a4), 0x0000ff00 },
+ { _MMIO(0x27a8), 0x0007fff3 },
+ { _MMIO(0x27ac), 0x0000fffe },
+};
+
+static const struct i915_oa_reg mux_config_compute_extended[] = {
+ { _MMIO(0x2681c), 0x3eb00800 },
+ { _MMIO(0x26820), 0x00900000 },
+ { _MMIO(0x25384), 0x02aaaaaa },
+ { _MMIO(0x25404), 0x03ffffff },
+ { _MMIO(0x26800), 0x00142284 },
+ { _MMIO(0x26808), 0x0e629062 },
+ { _MMIO(0x2680c), 0x3f6f55cb },
+ { _MMIO(0x26810), 0x00000014 },
+ { _MMIO(0x26804), 0x00000000 },
+ { _MMIO(0x26104), 0x02aaaaaa },
+ { _MMIO(0x26184), 0x02aaaaaa },
+ { _MMIO(0x25420), 0x00000000 },
+ { _MMIO(0x25424), 0x00000000 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x00000000 },
+};
+
+static const struct i915_oa_reg *
+get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_compute_extended);
+ return mux_config_compute_extended;
+}
+
+static const struct i915_oa_reg b_counter_config_memory_reads[] = {
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x274c), 0x76543298 },
+ { _MMIO(0x2748), 0x98989898 },
+ { _MMIO(0x2744), 0x000000e4 },
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x275c), 0x98a98a98 },
+ { _MMIO(0x2758), 0x88888888 },
+ { _MMIO(0x2754), 0x000c5500 },
+ { _MMIO(0x2750), 0x00000000 },
+ { _MMIO(0x2770), 0x0007f81a },
+ { _MMIO(0x2774), 0x0000fc00 },
+ { _MMIO(0x2778), 0x0007f82a },
+ { _MMIO(0x277c), 0x0000fc00 },
+ { _MMIO(0x2780), 0x0007f872 },
+ { _MMIO(0x2784), 0x0000fc00 },
+ { _MMIO(0x2788), 0x0007f8ba },
+ { _MMIO(0x278c), 0x0000fc00 },
+ { _MMIO(0x2790), 0x0007f87a },
+ { _MMIO(0x2794), 0x0000fc00 },
+ { _MMIO(0x2798), 0x0007f8ea },
+ { _MMIO(0x279c), 0x0000fc00 },
+ { _MMIO(0x27a0), 0x0007f8e2 },
+ { _MMIO(0x27a4), 0x0000fc00 },
+ { _MMIO(0x27a8), 0x0007f8f2 },
+ { _MMIO(0x27ac), 0x0000fc00 },
+};
+
+static const struct i915_oa_reg mux_config_memory_reads[] = {
+ { _MMIO(0x253a4), 0x34300000 },
+ { _MMIO(0x25440), 0x2d800000 },
+ { _MMIO(0x25444), 0x00000008 },
+ { _MMIO(0x25128), 0x0e600000 },
+ { _MMIO(0x25380), 0x00000450 },
+ { _MMIO(0x25390), 0x00052c43 },
+ { _MMIO(0x25384), 0x00000000 },
+ { _MMIO(0x25400), 0x00006144 },
+ { _MMIO(0x25408), 0x0a418820 },
+ { _MMIO(0x2540c), 0x000820e6 },
+ { _MMIO(0x25404), 0xff500000 },
+ { _MMIO(0x25100), 0x000005d6 },
+ { _MMIO(0x2510c), 0x0ef00000 },
+ { _MMIO(0x25104), 0x00000000 },
+ { _MMIO(0x25420), 0x02108421 },
+ { _MMIO(0x25424), 0x00008421 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x00000000 },
+};
+
+static const struct i915_oa_reg *
+get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_memory_reads);
+ return mux_config_memory_reads;
+}
+
+static const struct i915_oa_reg b_counter_config_memory_writes[] = {
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x274c), 0x76543298 },
+ { _MMIO(0x2748), 0x98989898 },
+ { _MMIO(0x2744), 0x000000e4 },
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x275c), 0xbabababa },
+ { _MMIO(0x2758), 0x88888888 },
+ { _MMIO(0x2754), 0x000c5500 },
+ { _MMIO(0x2750), 0x00000000 },
+ { _MMIO(0x2770), 0x0007f81a },
+ { _MMIO(0x2774), 0x0000fc00 },
+ { _MMIO(0x2778), 0x0007f82a },
+ { _MMIO(0x277c), 0x0000fc00 },
+ { _MMIO(0x2780), 0x0007f822 },
+ { _MMIO(0x2784), 0x0000fc00 },
+ { _MMIO(0x2788), 0x0007f8ba },
+ { _MMIO(0x278c), 0x0000fc00 },
+ { _MMIO(0x2790), 0x0007f87a },
+ { _MMIO(0x2794), 0x0000fc00 },
+ { _MMIO(0x2798), 0x0007f8ea },
+ { _MMIO(0x279c), 0x0000fc00 },
+ { _MMIO(0x27a0), 0x0007f8e2 },
+ { _MMIO(0x27a4), 0x0000fc00 },
+ { _MMIO(0x27a8), 0x0007f8f2 },
+ { _MMIO(0x27ac), 0x0000fc00 },
+};
+
+static const struct i915_oa_reg mux_config_memory_writes[] = {
+ { _MMIO(0x253a4), 0x34300000 },
+ { _MMIO(0x25440), 0x01500000 },
+ { _MMIO(0x25444), 0x00000120 },
+ { _MMIO(0x25128), 0x0c200000 },
+ { _MMIO(0x25380), 0x00000450 },
+ { _MMIO(0x25390), 0x00052c43 },
+ { _MMIO(0x25384), 0x00000000 },
+ { _MMIO(0x25400), 0x00007184 },
+ { _MMIO(0x25408), 0x0a418820 },
+ { _MMIO(0x2540c), 0x000820e6 },
+ { _MMIO(0x25404), 0xff500000 },
+ { _MMIO(0x25100), 0x000005d6 },
+ { _MMIO(0x2510c), 0x1e700000 },
+ { _MMIO(0x25104), 0x00000000 },
+ { _MMIO(0x25420), 0x02108421 },
+ { _MMIO(0x25424), 0x00008421 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x00000000 },
+};
+
+static const struct i915_oa_reg *
+get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_memory_writes);
+ return mux_config_memory_writes;
+}
+
+static const struct i915_oa_reg b_counter_config_sampler_balance[] = {
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2744), 0x00800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2714), 0x00800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2724), 0x00800000 },
+};
+
+static const struct i915_oa_reg mux_config_sampler_balance[] = {
+ { _MMIO(0x2eb9c), 0x01906400 },
+ { _MMIO(0x2fb9c), 0x01906400 },
+ { _MMIO(0x253a4), 0x00000000 },
+ { _MMIO(0x26b9c), 0x01906400 },
+ { _MMIO(0x27b9c), 0x01906400 },
+ { _MMIO(0x27104), 0x00a00000 },
+ { _MMIO(0x27184), 0x00a50000 },
+ { _MMIO(0x2e804), 0x00500000 },
+ { _MMIO(0x2e984), 0x00500000 },
+ { _MMIO(0x2eb04), 0x00500000 },
+ { _MMIO(0x2eb80), 0x00000084 },
+ { _MMIO(0x2eb8c), 0x14200000 },
+ { _MMIO(0x2eb84), 0x00000000 },
+ { _MMIO(0x2f804), 0x00050000 },
+ { _MMIO(0x2f984), 0x00050000 },
+ { _MMIO(0x2fb04), 0x00050000 },
+ { _MMIO(0x2fb80), 0x00000084 },
+ { _MMIO(0x2fb8c), 0x00050800 },
+ { _MMIO(0x2fb84), 0x00000000 },
+ { _MMIO(0x25380), 0x00000010 },
+ { _MMIO(0x2538c), 0x000000c0 },
+ { _MMIO(0x25384), 0xaa550000 },
+ { _MMIO(0x25404), 0xffffc000 },
+ { _MMIO(0x26804), 0x50000000 },
+ { _MMIO(0x26984), 0x50000000 },
+ { _MMIO(0x26b04), 0x50000000 },
+ { _MMIO(0x26b80), 0x00000084 },
+ { _MMIO(0x26b90), 0x00050800 },
+ { _MMIO(0x26b84), 0x00000000 },
+ { _MMIO(0x27804), 0x05000000 },
+ { _MMIO(0x27984), 0x05000000 },
+ { _MMIO(0x27b04), 0x05000000 },
+ { _MMIO(0x27b80), 0x00000084 },
+ { _MMIO(0x27b90), 0x00000142 },
+ { _MMIO(0x27b84), 0x00000000 },
+ { _MMIO(0x26104), 0xa0000000 },
+ { _MMIO(0x26184), 0xa5000000 },
+ { _MMIO(0x25424), 0x00008620 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x0004a54a },
+};
+
+static const struct i915_oa_reg *
+get_sampler_balance_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_sampler_balance);
+ return mux_config_sampler_balance;
+}
+
+int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
+{
+ dev_priv->perf.oa.mux_regs = NULL;
+ dev_priv->perf.oa.mux_regs_len = 0;
+ dev_priv->perf.oa.b_counter_regs = NULL;
+ dev_priv->perf.oa.b_counter_regs_len = 0;
+
+ switch (dev_priv->perf.oa.metrics_set) {
+ case METRIC_SET_ID_RENDER_BASIC:
+ dev_priv->perf.oa.mux_regs =
+ get_render_basic_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_render_basic;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_render_basic);
+
+ return 0;
+ case METRIC_SET_ID_COMPUTE_BASIC:
+ dev_priv->perf.oa.mux_regs =
+ get_compute_basic_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_compute_basic;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_compute_basic);
+
+ return 0;
+ case METRIC_SET_ID_COMPUTE_EXTENDED:
+ dev_priv->perf.oa.mux_regs =
+ get_compute_extended_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_compute_extended;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_compute_extended);
+
+ return 0;
+ case METRIC_SET_ID_MEMORY_READS:
+ dev_priv->perf.oa.mux_regs =
+ get_memory_reads_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_memory_reads;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_memory_reads);
+
+ return 0;
+ case METRIC_SET_ID_MEMORY_WRITES:
+ dev_priv->perf.oa.mux_regs =
+ get_memory_writes_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_memory_writes;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_memory_writes);
+
+ return 0;
+ case METRIC_SET_ID_SAMPLER_BALANCE:
+ dev_priv->perf.oa.mux_regs =
+ get_sampler_balance_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_sampler_balance;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_sampler_balance);
+
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static ssize_t
+show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
+}
+
+static struct device_attribute dev_attr_render_basic_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_render_basic_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_render_basic[] = {
+ &dev_attr_render_basic_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_render_basic = {
+ .name = "403d8832-1a27-4aa6-a64e-f5389ce7b212",
+ .attrs = attrs_render_basic,
+};
+
+static ssize_t
+show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
+}
+
+static struct device_attribute dev_attr_compute_basic_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_compute_basic_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_compute_basic[] = {
+ &dev_attr_compute_basic_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_compute_basic = {
+ .name = "39ad14bc-2380-45c4-91eb-fbcb3aa7ae7b",
+ .attrs = attrs_compute_basic,
+};
+
+static ssize_t
+show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
+}
+
+static struct device_attribute dev_attr_compute_extended_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_compute_extended_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_compute_extended[] = {
+ &dev_attr_compute_extended_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_compute_extended = {
+ .name = "3865be28-6982-49fe-9494-e4d1b4795413",
+ .attrs = attrs_compute_extended,
+};
+
+static ssize_t
+show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
+}
+
+static struct device_attribute dev_attr_memory_reads_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_memory_reads_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_memory_reads[] = {
+ &dev_attr_memory_reads_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_memory_reads = {
+ .name = "bb5ed49b-2497-4095-94f6-26ba294db88a",
+ .attrs = attrs_memory_reads,
+};
+
+static ssize_t
+show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
+}
+
+static struct device_attribute dev_attr_memory_writes_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_memory_writes_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_memory_writes[] = {
+ &dev_attr_memory_writes_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_memory_writes = {
+ .name = "3358d639-9b5f-45ab-976d-9b08cbfc6240",
+ .attrs = attrs_memory_writes,
+};
+
+static ssize_t
+show_sampler_balance_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_BALANCE);
+}
+
+static struct device_attribute dev_attr_sampler_balance_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_sampler_balance_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_sampler_balance[] = {
+ &dev_attr_sampler_balance_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_sampler_balance = {
+ .name = "bc274488-b4b6-40c7-90da-b77d7ad16189",
+ .attrs = attrs_sampler_balance,
+};
+
+int
+i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv)
+{
+ int mux_len;
+ int ret = 0;
+
+ if (get_render_basic_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+ if (ret)
+ goto error_render_basic;
+ }
+ if (get_compute_basic_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+ if (ret)
+ goto error_compute_basic;
+ }
+ if (get_compute_extended_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+ if (ret)
+ goto error_compute_extended;
+ }
+ if (get_memory_reads_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+ if (ret)
+ goto error_memory_reads;
+ }
+ if (get_memory_writes_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+ if (ret)
+ goto error_memory_writes;
+ }
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
+ if (ret)
+ goto error_sampler_balance;
+ }
+
+ return 0;
+
+error_sampler_balance:
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+error_memory_writes:
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+error_memory_reads:
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+error_compute_extended:
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+error_compute_basic:
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+error_render_basic:
+ return ret;
+}
+
+void
+i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv)
+{
+ int mux_len;
+
+ if (get_render_basic_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+ if (get_compute_basic_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+ if (get_compute_extended_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+ if (get_memory_reads_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+ if (get_memory_writes_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
new file mode 100644
index 000000000000..429a229b5158
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -0,0 +1,38 @@
+/*
+ * Autogenerated file, DO NOT EDIT manually!
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_HSW_H__
+#define __I915_OA_HSW_H__
+
+extern int i915_oa_n_builtin_metric_sets_hsw;
+
+extern int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv);
+
+extern int i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv);
+
+extern void i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d46ffe7086bc..0e280fbd52f1 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -50,7 +50,7 @@ struct i915_params i915 __read_mostly = {
.error_capture = true,
.invert_brightness = 0,
.disable_display = 0,
- .enable_cmd_parser = 1,
+ .enable_cmd_parser = true,
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
@@ -188,9 +188,9 @@ MODULE_PARM_DESC(invert_brightness,
module_param_named(disable_display, i915.disable_display, bool, 0400);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
-module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
+module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400);
MODULE_PARM_DESC(enable_cmd_parser,
- "Enable command parsing (1=enabled [default], 0=disabled)");
+ "Enable command parsing (true=enabled [default], false=disabled)");
module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 817ad959941e..8e433de04679 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -44,7 +44,6 @@ struct i915_params {
int disable_power_well;
int enable_ips;
int invert_brightness;
- int enable_cmd_parser;
int enable_guc_loading;
int enable_guc_submission;
int guc_log_level;
@@ -53,6 +52,7 @@ struct i915_params {
int edp_vswing;
unsigned int inject_load_failure;
/* leave bools at the end to not create holes */
+ bool enable_cmd_parser;
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index fce8e198bc76..ecb487b5356f 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -54,6 +54,7 @@
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+/* Keep in gen based order, and chronological order within a gen */
#define GEN2_FEATURES \
.gen = 2, .num_pipes = 1, \
.has_overlay = 1, .overlay_needs_physical = 1, \
@@ -65,17 +66,19 @@
static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
+ .platform = INTEL_I830,
.is_mobile = 1, .cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
-static const struct intel_device_info intel_845g_info = {
+static const struct intel_device_info intel_i845g_info = {
GEN2_FEATURES,
+ .platform = INTEL_I845G,
};
static const struct intel_device_info intel_i85x_info = {
GEN2_FEATURES,
- .is_i85x = 1, .is_mobile = 1,
+ .platform = INTEL_I85X, .is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
.cursor_needs_physical = 1,
.has_fbc = 1,
@@ -83,6 +86,7 @@ static const struct intel_device_info intel_i85x_info = {
static const struct intel_device_info intel_i865g_info = {
GEN2_FEATURES,
+ .platform = INTEL_I865G,
};
#define GEN3_FEATURES \
@@ -94,12 +98,14 @@ static const struct intel_device_info intel_i865g_info = {
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
- .is_i915g = 1, .cursor_needs_physical = 1,
+ .platform = INTEL_I915G, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
};
+
static const struct intel_device_info intel_i915gm_info = {
GEN3_FEATURES,
+ .platform = INTEL_I915GM,
.is_mobile = 1,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -107,15 +113,18 @@ static const struct intel_device_info intel_i915gm_info = {
.has_fbc = 1,
.hws_needs_physical = 1,
};
+
static const struct intel_device_info intel_i945g_info = {
GEN3_FEATURES,
+ .platform = INTEL_I945G,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
};
+
static const struct intel_device_info intel_i945gm_info = {
GEN3_FEATURES,
- .is_i945gm = 1, .is_mobile = 1,
+ .platform = INTEL_I945GM, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
@@ -123,6 +132,20 @@ static const struct intel_device_info intel_i945gm_info = {
.hws_needs_physical = 1,
};
+static const struct intel_device_info intel_g33_info = {
+ GEN3_FEATURES,
+ .platform = INTEL_G33,
+ .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+ GEN3_FEATURES,
+ .platform = INTEL_PINEVIEW, .is_mobile = 1,
+ .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
#define GEN4_FEATURES \
.gen = 4, .num_pipes = 2, \
.has_hotplug = 1, \
@@ -133,50 +156,36 @@ static const struct intel_device_info intel_i945gm_info = {
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
- .is_broadwater = 1,
+ .platform = INTEL_I965G,
.has_overlay = 1,
.hws_needs_physical = 1,
};
static const struct intel_device_info intel_i965gm_info = {
GEN4_FEATURES,
- .is_crestline = 1,
+ .platform = INTEL_I965GM,
.is_mobile = 1, .has_fbc = 1,
.has_overlay = 1,
.supports_tv = 1,
.hws_needs_physical = 1,
};
-static const struct intel_device_info intel_g33_info = {
- GEN3_FEATURES,
- .is_g33 = 1,
- .has_hotplug = 1,
- .has_overlay = 1,
-};
-
static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
- .is_g4x = 1,
+ .platform = INTEL_G45,
.has_pipe_cxsr = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_gm45_info = {
GEN4_FEATURES,
- .is_g4x = 1,
+ .platform = INTEL_GM45,
.is_mobile = 1, .has_fbc = 1,
.has_pipe_cxsr = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
-static const struct intel_device_info intel_pineview_info = {
- GEN3_FEATURES,
- .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
- .has_hotplug = 1,
- .has_overlay = 1,
-};
-
#define GEN5_FEATURES \
.gen = 5, .num_pipes = 2, \
.has_hotplug = 1, \
@@ -187,10 +196,12 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
+ .platform = INTEL_IRONLAKE,
};
static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
+ .platform = INTEL_IRONLAKE,
.is_mobile = 1,
};
@@ -204,15 +215,18 @@ static const struct intel_device_info intel_ironlake_m_info = {
.has_rc6p = 1, \
.has_gmbus_irq = 1, \
.has_hw_contexts = 1, \
+ .has_aliasing_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
static const struct intel_device_info intel_sandybridge_d_info = {
GEN6_FEATURES,
+ .platform = INTEL_SANDYBRIDGE,
};
static const struct intel_device_info intel_sandybridge_m_info = {
GEN6_FEATURES,
+ .platform = INTEL_SANDYBRIDGE,
.is_mobile = 1,
};
@@ -226,46 +240,49 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_rc6p = 1, \
.has_gmbus_irq = 1, \
.has_hw_contexts = 1, \
+ .has_aliasing_ppgtt = 1, \
+ .has_full_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
- .is_ivybridge = 1,
+ .platform = INTEL_IVYBRIDGE,
.has_l3_dpf = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
- .is_ivybridge = 1,
+ .platform = INTEL_IVYBRIDGE,
.is_mobile = 1,
.has_l3_dpf = 1,
};
static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
- .is_ivybridge = 1,
+ .platform = INTEL_IVYBRIDGE,
.num_pipes = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
-#define VLV_FEATURES \
- .gen = 7, .num_pipes = 2, \
- .has_psr = 1, \
- .has_runtime_pm = 1, \
- .has_rc6 = 1, \
- .has_gmbus_irq = 1, \
- .has_hw_contexts = 1, \
- .has_gmch_display = 1, \
- .has_hotplug = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
- .display_mmio_offset = VLV_DISPLAY_BASE, \
- GEN_DEFAULT_PIPEOFFSETS, \
- CURSOR_OFFSETS
-
static const struct intel_device_info intel_valleyview_info = {
- VLV_FEATURES,
- .is_valleyview = 1,
+ .platform = INTEL_VALLEYVIEW,
+ .gen = 7,
+ .is_lp = 1,
+ .num_pipes = 2,
+ .has_psr = 1,
+ .has_runtime_pm = 1,
+ .has_rc6 = 1,
+ .has_gmbus_irq = 1,
+ .has_hw_contexts = 1,
+ .has_gmch_display = 1,
+ .has_hotplug = 1,
+ .has_aliasing_ppgtt = 1,
+ .has_full_ppgtt = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS
};
#define HSW_FEATURES \
@@ -281,7 +298,7 @@ static const struct intel_device_info intel_valleyview_info = {
static const struct intel_device_info intel_haswell_info = {
HSW_FEATURES,
- .is_haswell = 1,
+ .platform = INTEL_HASWELL,
.has_l3_dpf = 1,
};
@@ -289,26 +306,28 @@ static const struct intel_device_info intel_haswell_info = {
HSW_FEATURES, \
BDW_COLORS, \
.has_logical_ring_contexts = 1, \
+ .has_full_48bit_ppgtt = 1, \
.has_64bit_reloc = 1
static const struct intel_device_info intel_broadwell_info = {
BDW_FEATURES,
.gen = 8,
- .is_broadwell = 1,
+ .platform = INTEL_BROADWELL,
};
static const struct intel_device_info intel_broadwell_gt3_info = {
BDW_FEATURES,
.gen = 8,
- .is_broadwell = 1,
+ .platform = INTEL_BROADWELL,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_cherryview_info = {
.gen = 8, .num_pipes = 3,
.has_hotplug = 1,
+ .is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .is_cherryview = 1,
+ .platform = INTEL_CHERRYVIEW,
.has_64bit_reloc = 1,
.has_psr = 1,
.has_runtime_pm = 1,
@@ -318,6 +337,8 @@ static const struct intel_device_info intel_cherryview_info = {
.has_hw_contexts = 1,
.has_logical_ring_contexts = 1,
.has_gmch_display = 1,
+ .has_aliasing_ppgtt = 1,
+ .has_full_ppgtt = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
@@ -326,7 +347,7 @@ static const struct intel_device_info intel_cherryview_info = {
static const struct intel_device_info intel_skylake_info = {
BDW_FEATURES,
- .is_skylake = 1,
+ .platform = INTEL_SKYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
@@ -335,7 +356,7 @@ static const struct intel_device_info intel_skylake_info = {
static const struct intel_device_info intel_skylake_gt3_info = {
BDW_FEATURES,
- .is_skylake = 1,
+ .platform = INTEL_SKYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
@@ -343,36 +364,50 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
+#define GEN9_LP_FEATURES \
+ .gen = 9, \
+ .is_lp = 1, \
+ .has_hotplug = 1, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .num_pipes = 3, \
+ .has_64bit_reloc = 1, \
+ .has_ddi = 1, \
+ .has_fpga_dbg = 1, \
+ .has_fbc = 1, \
+ .has_runtime_pm = 1, \
+ .has_pooled_eu = 0, \
+ .has_csr = 1, \
+ .has_resource_streamer = 1, \
+ .has_rc6 = 1, \
+ .has_dp_mst = 1, \
+ .has_gmbus_irq = 1, \
+ .has_hw_contexts = 1, \
+ .has_logical_ring_contexts = 1, \
+ .has_guc = 1, \
+ .has_decoupled_mmio = 1, \
+ .has_aliasing_ppgtt = 1, \
+ .has_full_ppgtt = 1, \
+ .has_full_48bit_ppgtt = 1, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ IVB_CURSOR_OFFSETS, \
+ BDW_COLORS
+
static const struct intel_device_info intel_broxton_info = {
- .is_broxton = 1,
- .gen = 9,
- .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .num_pipes = 3,
- .has_64bit_reloc = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- .has_runtime_pm = 1,
- .has_pooled_eu = 0,
- .has_csr = 1,
- .has_resource_streamer = 1,
- .has_rc6 = 1,
- .has_dp_mst = 1,
- .has_gmbus_irq = 1,
- .has_hw_contexts = 1,
- .has_logical_ring_contexts = 1,
- .has_guc = 1,
- .has_decoupled_mmio = 1,
+ GEN9_LP_FEATURES,
+ .platform = INTEL_BROXTON,
.ddb_size = 512,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
- BDW_COLORS,
+};
+
+static const struct intel_device_info intel_geminilake_info = {
+ GEN9_LP_FEATURES,
+ .platform = INTEL_GEMINILAKE,
+ .is_alpha_support = 1,
+ .ddb_size = 1024,
};
static const struct intel_device_info intel_kabylake_info = {
BDW_FEATURES,
- .is_kabylake = 1,
+ .platform = INTEL_KABYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
@@ -381,7 +416,7 @@ static const struct intel_device_info intel_kabylake_info = {
static const struct intel_device_info intel_kabylake_gt3_info = {
BDW_FEATURES,
- .is_kabylake = 1,
+ .platform = INTEL_KABYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
@@ -397,7 +432,7 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
*/
static const struct pci_device_id pciidlist[] = {
INTEL_I830_IDS(&intel_i830_info),
- INTEL_I845G_IDS(&intel_845g_info),
+ INTEL_I845G_IDS(&intel_i845g_info),
INTEL_I85X_IDS(&intel_i85x_info),
INTEL_I865G_IDS(&intel_i865g_info),
INTEL_I915G_IDS(&intel_i915g_info),
@@ -421,12 +456,14 @@ static const struct pci_device_id pciidlist[] = {
INTEL_VLV_IDS(&intel_valleyview_info),
INTEL_BDW_GT12_IDS(&intel_broadwell_info),
INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
+ INTEL_BDW_RSVD_IDS(&intel_broadwell_info),
INTEL_CHV_IDS(&intel_cherryview_info),
INTEL_SKL_GT1_IDS(&intel_skylake_info),
INTEL_SKL_GT2_IDS(&intel_skylake_info),
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
INTEL_BXT_IDS(&intel_broxton_info),
+ INTEL_GLK_IDS(&intel_geminilake_info),
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
new file mode 100644
index 000000000000..a1b7eec58be2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -0,0 +1,2096 @@
+/*
+ * Copyright © 2015-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Robert Bragg <robert@sixbynine.org>
+ */
+
+
+/**
+ * DOC: i915 Perf Overview
+ *
+ * Gen graphics supports a large number of performance counters that can help
+ * driver and application developers understand and optimize their use of the
+ * GPU.
+ *
+ * This i915 perf interface enables userspace to configure and open a file
+ * descriptor representing a stream of GPU metrics which can then be read() as
+ * a stream of sample records.
+ *
+ * The interface is particularly suited to exposing buffered metrics that are
+ * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
+ *
+ * Streams representing a single context are accessible to applications with a
+ * corresponding drm file descriptor, such that OpenGL can use the interface
+ * without special privileges. Access to system-wide metrics requires root
+ * privileges by default, unless changed via the dev.i915.perf_event_paranoid
+ * sysctl option.
+ *
+ */
+
+/**
+ * DOC: i915 Perf History and Comparison with Core Perf
+ *
+ * The interface was initially inspired by the core Perf infrastructure but
+ * some notable differences are:
+ *
+ * i915 perf file descriptors represent a "stream" instead of an "event"; where
+ * a perf event primarily corresponds to a single 64bit value, while a stream
+ * might sample sets of tightly-coupled counters, depending on the
+ * configuration. For example the Gen OA unit isn't designed to support
+ * orthogonal configurations of individual counters; it's configured for a set
+ * of related counters. Samples for an i915 perf stream capturing OA metrics
+ * will include a set of counter values packed in a compact HW specific format.
+ * The OA unit supports a number of different packing formats which can be
+ * selected by the user opening the stream. Perf has support for grouping
+ * events, but each event in the group is configured, validated and
+ * authenticated individually with separate system calls.
+ *
+ * i915 perf stream configurations are provided as an array of u64 (key,value)
+ * pairs, instead of a fixed struct with multiple miscellaneous config members,
+ * interleaved with event-type specific members.
+ *
+ * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
+ * The supported metrics are being written to memory by the GPU unsynchronized
+ * with the CPU, using HW specific packing formats for counter sets. Sometimes
+ * the constraints on HW configuration require reports to be filtered before it
+ * would be acceptable to expose them to unprivileged applications - to hide
+ * the metrics of other processes/contexts. For these use cases a read() based
+ * interface is a good fit, and provides an opportunity to filter data as it
+ * gets copied from the GPU mapped buffers to userspace buffers.
+ *
+ *
+ * Issues hit with first prototype based on Core Perf
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The first prototype of this driver was based on the core perf
+ * infrastructure, and while we did make that mostly work, with some changes to
+ * perf, we found we were breaking or working around too many assumptions baked
+ * into perf's currently cpu centric design.
+ *
+ * In the end we didn't see a clear benefit to making perf's implementation and
+ * interface more complex by changing design assumptions while we knew we still
+ * wouldn't be able to use any existing perf based userspace tools.
+ *
+ * Also considering the Gen specific nature of the Observability hardware and
+ * how userspace will sometimes need to combine i915 perf OA metrics with
+ * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
+ * expecting the interface to be used by a platform specific userspace such as
+ * OpenGL or tools. This is to say; we aren't inherently missing out on having
+ * a standard vendor/architecture agnostic interface by not using perf.
+ *
+ *
+ * For posterity, in case we might re-visit trying to adapt core perf to be
+ * better suited to exposing i915 metrics these were the main pain points we
+ * hit:
+ *
+ * - The perf based OA PMU driver broke some significant design assumptions:
+ *
+ * Existing perf pmus are used for profiling work on a cpu and we were
+ * introducing the idea of _IS_DEVICE pmus with different security
+ * implications, the need to fake cpu-related data (such as user/kernel
+ * registers) to fit with perf's current design, and adding _DEVICE records
+ * as a way to forward device-specific status records.
+ *
+ * The OA unit writes reports of counters into a circular buffer, without
+ * involvement from the CPU, making our PMU driver the first of a kind.
+ *
+ * Given the way we were periodically forward data from the GPU-mapped, OA
+ * buffer to perf's buffer, those bursts of sample writes looked to perf like
+ * we were sampling too fast and so we had to subvert its throttling checks.
+ *
+ * Perf supports groups of counters and allows those to be read via
+ * transactions internally but transactions currently seem designed to be
+ * explicitly initiated from the cpu (say in response to a userspace read())
+ * and while we could pull a report out of the OA buffer we can't
+ * trigger a report from the cpu on demand.
+ *
+ * Related to being report based; the OA counters are configured in HW as a
+ * set while perf generally expects counter configurations to be orthogonal.
+ * Although counters can be associated with a group leader as they are
+ * opened, there's no clear precedent for being able to provide group-wide
+ * configuration attributes (for example we want to let userspace choose the
+ * OA unit report format used to capture all counters in a set, or specify a
+ * GPU context to filter metrics on). We avoided using perf's grouping
+ * feature and forwarded OA reports to userspace via perf's 'raw' sample
+ * field. This suited our userspace well considering how coupled the counters
+ * are when dealing with normalizing. It would be inconvenient to split
+ * counters up into separate events, only to require userspace to recombine
+ * them. For Mesa it's also convenient to be forwarded raw, periodic reports
+ * for combining with the side-band raw reports it captures using
+ * MI_REPORT_PERF_COUNT commands.
+ *
+ * - As a side note on perf's grouping feature; there was also some concern
+ * that using PERF_FORMAT_GROUP as a way to pack together counter values
+ * would quite drastically inflate our sample sizes, which would likely
+ * lower the effective sampling resolutions we could use when the available
+ * memory bandwidth is limited.
+ *
+ * With the OA unit's report formats, counters are packed together as 32
+ * or 40bit values, with the largest report size being 256 bytes.
+ *
+ * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
+ * documented ordering to the values, implying PERF_FORMAT_ID must also be
+ * used to add a 64bit ID before each value; giving 16 bytes per counter.
+ *
+ * Related to counter orthogonality; we can't time share the OA unit, while
+ * event scheduling is a central design idea within perf for allowing
+ * userspace to open + enable more events than can be configured in HW at any
+ * one time. The OA unit is not designed to allow re-configuration while in
+ * use. We can't reconfigure the OA unit without losing internal OA unit
+ * state which we can't access explicitly to save and restore. Reconfiguring
+ * the OA unit is also relatively slow, involving ~100 register writes. From
+ * userspace Mesa also depends on a stable OA configuration when emitting
+ * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
+ * disabled while there are outstanding MI_RPC commands lest we hang the
+ * command streamer.
+ *
+ * The contents of sample records aren't extensible by device drivers (i.e.
+ * the sample_type bits). As an example; Sourab Gupta had been looking to
+ * attach GPU timestamps to our OA samples. We were shoehorning OA reports
+ * into sample records by using the 'raw' field, but it's tricky to pack more
+ * than one thing into this field because events/core.c currently only lets a
+ * pmu give a single raw data pointer plus len which will be copied into the
+ * ring buffer. To include more than the OA report we'd have to copy the
+ * report into an intermediate larger buffer. I'd been considering allowing a
+ * vector of data+len values to be specified for copying the raw data, but
+ * it felt like a kludge to being using the raw field for this purpose.
+ *
+ * - It felt like our perf based PMU was making some technical compromises
+ * just for the sake of using perf:
+ *
+ * perf_event_open() requires events to either relate to a pid or a specific
+ * cpu core, while our device pmu related to neither. Events opened with a
+ * pid will be automatically enabled/disabled according to the scheduling of
+ * that process - so not appropriate for us. When an event is related to a
+ * cpu id, perf ensures pmu methods will be invoked via an inter process
+ * interrupt on that core. To avoid invasive changes our userspace opened OA
+ * perf events for a specific cpu. This was workable but it meant the
+ * majority of the OA driver ran in atomic context, including all OA report
+ * forwarding, which wasn't really necessary in our case and seems to make
+ * our locking requirements somewhat complex as we handled the interaction
+ * with the rest of the i915 driver.
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/sizes.h>
+
+#include "i915_drv.h"
+#include "i915_oa_hsw.h"
+
+/* HW requires this to be a power of two, between 128k and 16M, though driver
+ * is currently generally designed assuming the largest 16M size is used such
+ * that the overflow cases are unlikely in normal operation.
+ */
+#define OA_BUFFER_SIZE SZ_16M
+
+#define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
+
+/* There's a HW race condition between OA unit tail pointer register updates and
+ * writes to memory whereby the tail pointer can sometimes get ahead of what's
+ * been written out to the OA buffer so far.
+ *
+ * Although this can be observed explicitly by checking for a zeroed report-id
+ * field in tail reports, it seems preferable to account for this earlier e.g.
+ * as part of the _oa_buffer_is_empty checks to minimize -EAGAIN polling cycles
+ * in this situation.
+ *
+ * To give time for the most recent reports to land before they may be copied to
+ * userspace, the driver operates as if the tail pointer effectively lags behind
+ * the HW tail pointer by 'tail_margin' bytes. The margin in bytes is calculated
+ * based on this constant in nanoseconds, the current OA sampling exponent
+ * and current report size.
+ *
+ * There is also a fallback check while reading to simply skip over reports with
+ * a zeroed report-id.
+ */
+#define OA_TAIL_MARGIN_NSEC 100000ULL
+
+/* frequency for checking whether the OA unit has written new reports to the
+ * circular OA buffer...
+ */
+#define POLL_FREQUENCY 200
+#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
+
+/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
+static int zero;
+static int one = 1;
+static u32 i915_perf_stream_paranoid = true;
+
+/* The maximum exponent the hardware accepts is 63 (essentially it selects one
+ * of the 64bit timestamp bits to trigger reports from) but there's currently
+ * no known use case for sampling as infrequently as once per 47 thousand years.
+ *
+ * Since the timestamps included in OA reports are only 32bits it seems
+ * reasonable to limit the OA exponent where it's still possible to account for
+ * overflow in OA report timestamps.
+ */
+#define OA_EXPONENT_MAX 31
+
+#define INVALID_CTX_ID 0xffffffff
+
+
+/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
+ *
+ * 160ns is the smallest sampling period we can theoretically program the OA
+ * unit with on Haswell, corresponding to 6.25MHz.
+ */
+static int oa_sample_rate_hard_limit = 6250000;
+
+/* Theoretically we can program the OA unit to sample every 160ns but don't
+ * allow that by default unless root...
+ *
+ * The default threshold of 100000Hz is based on perf's similar
+ * kernel.perf_event_max_sample_rate sysctl parameter.
+ */
+static u32 i915_oa_max_sample_rate = 100000;
+
+/* XXX: beware if future OA HW adds new report formats that the current
+ * code assumes all reports have a power-of-two size and ~(size - 1) can
+ * be used as a mask to align the OA tail pointer.
+ */
+static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+ [I915_OA_FORMAT_A13] = { 0, 64 },
+ [I915_OA_FORMAT_A29] = { 1, 128 },
+ [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
+ /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
+ [I915_OA_FORMAT_B4_C8] = { 4, 64 },
+ [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
+ [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
+ [I915_OA_FORMAT_C4_B8] = { 7, 64 },
+};
+
+#define SAMPLE_OA_REPORT (1<<0)
+
+/**
+ * struct perf_open_properties - for validated properties given to open a stream
+ * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
+ * @single_context: Whether a single or all gpu contexts should be monitored
+ * @ctx_handle: A gem ctx handle for use with @single_context
+ * @metrics_set: An ID for an OA unit metric set advertised via sysfs
+ * @oa_format: An OA unit HW report format
+ * @oa_periodic: Whether to enable periodic OA unit sampling
+ * @oa_period_exponent: The OA unit sampling period is derived from this
+ *
+ * As read_properties_unlocked() enumerates and validates the properties given
+ * to open a stream of metrics the configuration is built up in the structure
+ * which starts out zero initialized.
+ */
+struct perf_open_properties {
+ u32 sample_flags;
+
+ u64 single_context:1;
+ u64 ctx_handle;
+
+ /* OA sampling state */
+ int metrics_set;
+ int oa_format;
+ bool oa_periodic;
+ int oa_period_exponent;
+};
+
+/* NB: This is either called via fops or the poll check hrtimer (atomic ctx)
+ *
+ * It's safe to read OA config state here unlocked, assuming that this is only
+ * called while the stream is enabled, while the global OA configuration can't
+ * be modified.
+ *
+ * Note: we don't lock around the head/tail reads even though there's the slim
+ * possibility of read() fop errors forcing a re-init of the OA buffer
+ * pointers. A race here could result in a false positive !empty status which
+ * is acceptable.
+ */
+static bool gen7_oa_buffer_is_empty_fop_unlocked(struct drm_i915_private *dev_priv)
+{
+ int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ u32 oastatus2 = I915_READ(GEN7_OASTATUS2);
+ u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
+ u32 head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
+ u32 tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
+
+ return OA_TAKEN(tail, head) <
+ dev_priv->perf.oa.tail_margin + report_size;
+}
+
+/**
+ * append_oa_status - Appends a status record to a userspace read() buffer.
+ * @stream: An i915-perf stream opened for OA metrics
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @offset: (inout): the current position for writing into @buf
+ * @type: The kind of status to report to userspace
+ *
+ * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
+ * into the userspace read() buffer.
+ *
+ * The @buf @offset will only be updated on success.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static int append_oa_status(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset,
+ enum drm_i915_perf_record_type type)
+{
+ struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
+
+ if ((count - *offset) < header.size)
+ return -ENOSPC;
+
+ if (copy_to_user(buf + *offset, &header, sizeof(header)))
+ return -EFAULT;
+
+ (*offset) += header.size;
+
+ return 0;
+}
+
+/**
+ * append_oa_sample - Copies single OA report into userspace read() buffer.
+ * @stream: An i915-perf stream opened for OA metrics
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @offset: (inout): the current position for writing into @buf
+ * @report: A single OA report to (optionally) include as part of the sample
+ *
+ * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
+ * properties when opening a stream, tracked as `stream->sample_flags`. This
+ * function copies the requested components of a single sample to the given
+ * read() @buf.
+ *
+ * The @buf @offset will only be updated on success.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static int append_oa_sample(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset,
+ const u8 *report)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ struct drm_i915_perf_record_header header;
+ u32 sample_flags = stream->sample_flags;
+
+ header.type = DRM_I915_PERF_RECORD_SAMPLE;
+ header.pad = 0;
+ header.size = stream->sample_size;
+
+ if ((count - *offset) < header.size)
+ return -ENOSPC;
+
+ buf += *offset;
+ if (copy_to_user(buf, &header, sizeof(header)))
+ return -EFAULT;
+ buf += sizeof(header);
+
+ if (sample_flags & SAMPLE_OA_REPORT) {
+ if (copy_to_user(buf, report, report_size))
+ return -EFAULT;
+ }
+
+ (*offset) += header.size;
+
+ return 0;
+}
+
+/**
+ * Copies all buffered OA reports into userspace read() buffer.
+ * @stream: An i915-perf stream opened for OA metrics
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @offset: (inout): the current position for writing into @buf
+ * @head_ptr: (inout): the current oa buffer cpu read position
+ * @tail: the current oa buffer gpu write position
+ *
+ * Notably any error condition resulting in a short read (-%ENOSPC or
+ * -%EFAULT) will be returned even though one or more records may
+ * have been successfully copied. In this case it's up to the caller
+ * to decide if the error should be squashed before returning to
+ * userspace.
+ *
+ * Note: reports are consumed from the head, and appended to the
+ * tail, so the head chases the tail?... If you think that's mad
+ * and back-to-front you're not alone, but this follows the
+ * Gen PRM naming convention.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static int gen7_append_oa_reports(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset,
+ u32 *head_ptr,
+ u32 tail)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
+ int tail_margin = dev_priv->perf.oa.tail_margin;
+ u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ u32 mask = (OA_BUFFER_SIZE - 1);
+ u32 head;
+ u32 taken;
+ int ret = 0;
+
+ if (WARN_ON(!stream->enabled))
+ return -EIO;
+
+ head = *head_ptr - gtt_offset;
+ tail -= gtt_offset;
+
+ /* The OA unit is expected to wrap the tail pointer according to the OA
+ * buffer size and since we should never write a misaligned head
+ * pointer we don't expect to read one back either...
+ */
+ if (tail > OA_BUFFER_SIZE || head > OA_BUFFER_SIZE ||
+ head % report_size) {
+ DRM_ERROR("Inconsistent OA buffer pointer (head = %u, tail = %u): force restart\n",
+ head, tail);
+ dev_priv->perf.oa.ops.oa_disable(dev_priv);
+ dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ *head_ptr = I915_READ(GEN7_OASTATUS2) &
+ GEN7_OASTATUS2_HEAD_MASK;
+ return -EIO;
+ }
+
+
+ /* The tail pointer increases in 64 byte increments, not in report_size
+ * steps...
+ */
+ tail &= ~(report_size - 1);
+
+ /* Move the tail pointer back by the current tail_margin to account for
+ * the possibility that the latest reports may not have really landed
+ * in memory yet...
+ */
+
+ if (OA_TAKEN(tail, head) < report_size + tail_margin)
+ return -EAGAIN;
+
+ tail -= tail_margin;
+ tail &= mask;
+
+ for (/* none */;
+ (taken = OA_TAKEN(tail, head));
+ head = (head + report_size) & mask) {
+ u8 *report = oa_buf_base + head;
+ u32 *report32 = (void *)report;
+
+ /* All the report sizes factor neatly into the buffer
+ * size so we never expect to see a report split
+ * between the beginning and end of the buffer.
+ *
+ * Given the initial alignment check a misalignment
+ * here would imply a driver bug that would result
+ * in an overrun.
+ */
+ if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
+ DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ break;
+ }
+
+ /* The report-ID field for periodic samples includes
+ * some undocumented flags related to what triggered
+ * the report and is never expected to be zero so we
+ * can check that the report isn't invalid before
+ * copying it to userspace...
+ */
+ if (report32[0] == 0) {
+ DRM_NOTE("Skipping spurious, invalid OA report\n");
+ continue;
+ }
+
+ ret = append_oa_sample(stream, buf, count, offset, report);
+ if (ret)
+ break;
+
+ /* The above report-id field sanity check is based on
+ * the assumption that the OA buffer is initially
+ * zeroed and we reset the field after copying so the
+ * check is still meaningful once old reports start
+ * being overwritten.
+ */
+ report32[0] = 0;
+ }
+
+ *head_ptr = gtt_offset + head;
+
+ return ret;
+}
+
+/**
+ * gen7_oa_read - copy status records then buffered OA reports
+ * @stream: An i915-perf stream opened for OA metrics
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @offset: (inout): the current position for writing into @buf
+ *
+ * Checks Gen 7 specific OA unit status registers and if necessary appends
+ * corresponding status records for userspace (such as for a buffer full
+ * condition) and then initiate appending any buffered OA reports.
+ *
+ * Updates @offset according to the number of bytes successfully copied into
+ * the userspace buffer.
+ *
+ * Returns: zero on success or a negative error code
+ */
+static int gen7_oa_read(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ u32 oastatus2;
+ u32 oastatus1;
+ u32 head;
+ u32 tail;
+ int ret;
+
+ if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+ return -EIO;
+
+ oastatus2 = I915_READ(GEN7_OASTATUS2);
+ oastatus1 = I915_READ(GEN7_OASTATUS1);
+
+ head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
+ tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
+
+ /* XXX: On Haswell we don't have a safe way to clear oastatus1
+ * bits while the OA unit is enabled (while the tail pointer
+ * may be updated asynchronously) so we ignore status bits
+ * that have already been reported to userspace.
+ */
+ oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
+
+ /* We treat OABUFFER_OVERFLOW as a significant error:
+ *
+ * - The status can be interpreted to mean that the buffer is
+ * currently full (with a higher precedence than OA_TAKEN()
+ * which will start to report a near-empty buffer after an
+ * overflow) but it's awkward that we can't clear the status
+ * on Haswell, so without a reset we won't be able to catch
+ * the state again.
+ *
+ * - Since it also implies the HW has started overwriting old
+ * reports it may also affect our sanity checks for invalid
+ * reports when copying to userspace that assume new reports
+ * are being written to cleared memory.
+ *
+ * - In the future we may want to introduce a flight recorder
+ * mode where the driver will automatically maintain a safe
+ * guard band between head/tail, avoiding this overflow
+ * condition, but we avoid the added driver complexity for
+ * now.
+ */
+ if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
+ ret = append_oa_status(stream, buf, count, offset,
+ DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG("OA buffer overflow: force restart\n");
+
+ dev_priv->perf.oa.ops.oa_disable(dev_priv);
+ dev_priv->perf.oa.ops.oa_enable(dev_priv);
+
+ oastatus2 = I915_READ(GEN7_OASTATUS2);
+ oastatus1 = I915_READ(GEN7_OASTATUS1);
+
+ head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
+ tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
+ }
+
+ if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
+ ret = append_oa_status(stream, buf, count, offset,
+ DRM_I915_PERF_RECORD_OA_REPORT_LOST);
+ if (ret)
+ return ret;
+ dev_priv->perf.oa.gen7_latched_oastatus1 |=
+ GEN7_OASTATUS1_REPORT_LOST;
+ }
+
+ ret = gen7_append_oa_reports(stream, buf, count, offset,
+ &head, tail);
+
+ /* All the report sizes are a power of two and the
+ * head should always be incremented by some multiple
+ * of the report size.
+ *
+ * A warning here, but notably if we later read back a
+ * misaligned pointer we will treat that as a bug since
+ * it could lead to a buffer overrun.
+ */
+ WARN_ONCE(head & (report_size - 1),
+ "i915: Writing misaligned OA head pointer");
+
+ /* Note: we update the head pointer here even if an error
+ * was returned since the error may represent a short read
+ * where some some reports were successfully copied.
+ */
+ I915_WRITE(GEN7_OASTATUS2,
+ ((head & GEN7_OASTATUS2_HEAD_MASK) |
+ OA_MEM_SELECT_GGTT));
+
+ return ret;
+}
+
+/**
+ * i915_oa_wait_unlocked - handles blocking IO until OA data available
+ * @stream: An i915-perf stream opened for OA metrics
+ *
+ * Called when userspace tries to read() from a blocking stream FD opened
+ * for OA metrics. It waits until the hrtimer callback finds a non-empty
+ * OA buffer and wakes us.
+ *
+ * Note: it's acceptable to have this return with some false positives
+ * since any subsequent read handling will return -EAGAIN if there isn't
+ * really data ready for userspace yet.
+ *
+ * Returns: zero on success or a negative error code
+ */
+static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ /* We would wait indefinitely if periodic sampling is not enabled */
+ if (!dev_priv->perf.oa.periodic)
+ return -EIO;
+
+ /* Note: the oa_buffer_is_empty() condition is ok to run unlocked as it
+ * just performs mmio reads of the OA buffer head + tail pointers and
+ * it's assumed we're handling some operation that implies the stream
+ * can't be destroyed until completion (such as a read()) that ensures
+ * the device + OA buffer can't disappear
+ */
+ return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
+ !dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv));
+}
+
+/**
+ * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
+ * @stream: An i915-perf stream opened for OA metrics
+ * @file: An i915 perf stream file
+ * @wait: poll() state table
+ *
+ * For handling userspace polling on an i915 perf stream opened for OA metrics,
+ * this starts a poll_wait with the wait queue that our hrtimer callback wakes
+ * when it sees data ready to read in the circular OA buffer.
+ */
+static void i915_oa_poll_wait(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
+}
+
+/**
+ * i915_oa_read - just calls through to &i915_oa_ops->read
+ * @stream: An i915-perf stream opened for OA metrics
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @offset: (inout): the current position for writing into @buf
+ *
+ * Updates @offset according to the number of bytes successfully copied into
+ * the userspace buffer.
+ *
+ * Returns: zero on success or a negative error code
+ */
+static int i915_oa_read(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
+}
+
+/**
+ * oa_get_render_ctx_id - determine and hold ctx hw id
+ * @stream: An i915-perf stream opened for OA metrics
+ *
+ * Determine the render context hw id, and ensure it remains fixed for the
+ * lifetime of the stream. This ensures that we don't have to worry about
+ * updating the context ID in OACONTROL on the fly.
+ *
+ * Returns: zero on success or a negative error code
+ */
+static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (ret)
+ return ret;
+
+ /* As the ID is the gtt offset of the context's vma we pin
+ * the vma to ensure the ID remains fixed.
+ *
+ * NB: implied RCS engine...
+ */
+ ret = engine->context_pin(engine, stream->ctx);
+ if (ret)
+ goto unlock;
+
+ /* Explicitly track the ID (instead of calling i915_ggtt_offset()
+ * on the fly) considering the difference with gen8+ and
+ * execlists
+ */
+ dev_priv->perf.oa.specific_ctx_id =
+ i915_ggtt_offset(stream->ctx->engine[engine->id].state);
+
+unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ return ret;
+}
+
+/**
+ * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
+ * @stream: An i915-perf stream opened for OA metrics
+ *
+ * In case anything needed doing to ensure the context HW ID would remain valid
+ * for the lifetime of the stream, then that can be undone here.
+ */
+static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[RCS];
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+ engine->context_unpin(engine, stream->ctx);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+static void
+free_oa_buffer(struct drm_i915_private *i915)
+{
+ mutex_lock(&i915->drm.struct_mutex);
+
+ i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
+ i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
+ i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
+
+ i915->perf.oa.oa_buffer.vma = NULL;
+ i915->perf.oa.oa_buffer.vaddr = NULL;
+
+ mutex_unlock(&i915->drm.struct_mutex);
+}
+
+static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
+
+ dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+
+ free_oa_buffer(dev_priv);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
+
+ if (stream->ctx)
+ oa_put_render_ctx_id(stream);
+
+ dev_priv->perf.oa.exclusive_stream = NULL;
+}
+
+static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
+{
+ u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+
+ /* Pre-DevBDW: OABUFFER must be set with counters off,
+ * before OASTATUS1, but after OASTATUS2
+ */
+ I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
+ I915_WRITE(GEN7_OABUFFER, gtt_offset);
+ I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
+
+ /* On Haswell we have to track which OASTATUS1 flags we've
+ * already seen since they can't be cleared while periodic
+ * sampling is enabled.
+ */
+ dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
+
+ /* NB: although the OA buffer will initially be allocated
+ * zeroed via shmfs (and so this memset is redundant when
+ * first allocating), we may re-init the OA buffer, either
+ * when re-enabling a stream or in error/reset paths.
+ *
+ * The reason we clear the buffer for each re-init is for the
+ * sanity check in gen7_append_oa_reports() that looks at the
+ * report-id field to make sure it's non-zero which relies on
+ * the assumption that new reports are being written to zeroed
+ * memory...
+ */
+ memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+
+ /* Maybe make ->pollin per-stream state if we support multiple
+ * concurrent streams in the future.
+ */
+ dev_priv->perf.oa.pollin = false;
+}
+
+static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *bo;
+ struct i915_vma *vma;
+ int ret;
+
+ if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
+ return -ENODEV;
+
+ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (ret)
+ return ret;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
+ BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
+
+ bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
+ if (IS_ERR(bo)) {
+ DRM_ERROR("Failed to allocate OA buffer\n");
+ ret = PTR_ERR(bo);
+ goto unlock;
+ }
+
+ ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
+ if (ret)
+ goto err_unref;
+
+ /* PreHSW required 512K alignment, HSW requires 16M */
+ vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+ dev_priv->perf.oa.oa_buffer.vma = vma;
+
+ dev_priv->perf.oa.oa_buffer.vaddr =
+ i915_gem_object_pin_map(bo, I915_MAP_WB);
+ if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
+ ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
+ goto err_unpin;
+ }
+
+ dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
+
+ DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
+ i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
+ dev_priv->perf.oa.oa_buffer.vaddr);
+
+ goto unlock;
+
+err_unpin:
+ __i915_vma_unpin(vma);
+
+err_unref:
+ i915_gem_object_put(bo);
+
+ dev_priv->perf.oa.oa_buffer.vaddr = NULL;
+ dev_priv->perf.oa.oa_buffer.vma = NULL;
+
+unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static void config_oa_regs(struct drm_i915_private *dev_priv,
+ const struct i915_oa_reg *regs,
+ int n_regs)
+{
+ int i;
+
+ for (i = 0; i < n_regs; i++) {
+ const struct i915_oa_reg *reg = regs + i;
+
+ I915_WRITE(reg->addr, reg->value);
+ }
+}
+
+static int hsw_enable_metric_set(struct drm_i915_private *dev_priv)
+{
+ int ret = i915_oa_select_metric_set_hsw(dev_priv);
+
+ if (ret)
+ return ret;
+
+ I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) |
+ GT_NOA_ENABLE));
+
+ /* PRM:
+ *
+ * OA unit is using “crclk” for its functionality. When trunk
+ * level clock gating takes place, OA clock would be gated,
+ * unable to count the events from non-render clock domain.
+ * Render clock gating must be disabled when OA is enabled to
+ * count the events from non-render domain. Unit level clock
+ * gating for RCS should also be disabled.
+ */
+ I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+ ~GEN7_DOP_CLOCK_GATE_ENABLE));
+ I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE));
+
+ config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs,
+ dev_priv->perf.oa.mux_regs_len);
+
+ /* It apparently takes a fairly long time for a new MUX
+ * configuration to be be applied after these register writes.
+ * This delay duration was derived empirically based on the
+ * render_basic config but hopefully it covers the maximum
+ * configuration latency.
+ *
+ * As a fallback, the checks in _append_oa_reports() to skip
+ * invalid OA reports do also seem to work to discard reports
+ * generated before this config has completed - albeit not
+ * silently.
+ *
+ * Unfortunately this is essentially a magic number, since we
+ * don't currently know of a reliable mechanism for predicting
+ * how long the MUX config will take to apply and besides
+ * seeing invalid reports we don't know of a reliable way to
+ * explicitly check that the MUX config has landed.
+ *
+ * It's even possible we've miss characterized the underlying
+ * problem - it just seems like the simplest explanation why
+ * a delay at this location would mitigate any invalid reports.
+ */
+ usleep_range(15000, 20000);
+
+ config_oa_regs(dev_priv, dev_priv->perf.oa.b_counter_regs,
+ dev_priv->perf.oa.b_counter_regs_len);
+
+ return 0;
+}
+
+static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
+ ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
+ I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
+ GEN7_DOP_CLOCK_GATE_ENABLE));
+
+ I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
+ ~GT_NOA_ENABLE));
+}
+
+static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv)
+{
+ assert_spin_locked(&dev_priv->perf.hook_lock);
+
+ if (dev_priv->perf.oa.exclusive_stream->enabled) {
+ struct i915_gem_context *ctx =
+ dev_priv->perf.oa.exclusive_stream->ctx;
+ u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
+
+ bool periodic = dev_priv->perf.oa.periodic;
+ u32 period_exponent = dev_priv->perf.oa.period_exponent;
+ u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+
+ I915_WRITE(GEN7_OACONTROL,
+ (ctx_id & GEN7_OACONTROL_CTX_MASK) |
+ (period_exponent <<
+ GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
+ (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
+ (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
+ (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
+ GEN7_OACONTROL_ENABLE);
+ } else
+ I915_WRITE(GEN7_OACONTROL, 0);
+}
+
+static void gen7_oa_enable(struct drm_i915_private *dev_priv)
+{
+ unsigned long flags;
+
+ /* Reset buf pointers so we don't forward reports from before now.
+ *
+ * Think carefully if considering trying to avoid this, since it
+ * also ensures status flags and the buffer itself are cleared
+ * in error paths, and we have checks for invalid reports based
+ * on the assumption that certain fields are written to zeroed
+ * memory which this helps maintains.
+ */
+ gen7_init_oa_buffer(dev_priv);
+
+ spin_lock_irqsave(&dev_priv->perf.hook_lock, flags);
+ gen7_update_oacontrol_locked(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->perf.hook_lock, flags);
+}
+
+/**
+ * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
+ * @stream: An i915 perf stream opened for OA metrics
+ *
+ * [Re]enables hardware periodic sampling according to the period configured
+ * when opening the stream. This also starts a hrtimer that will periodically
+ * check for data in the circular OA buffer for notifying userspace (e.g.
+ * during a read() or poll()).
+ */
+static void i915_oa_stream_enable(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ dev_priv->perf.oa.ops.oa_enable(dev_priv);
+
+ if (dev_priv->perf.oa.periodic)
+ hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
+ ns_to_ktime(POLL_PERIOD),
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static void gen7_oa_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(GEN7_OACONTROL, 0);
+}
+
+/**
+ * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
+ * @stream: An i915 perf stream opened for OA metrics
+ *
+ * Stops the OA unit from periodically writing counter reports into the
+ * circular OA buffer. This also stops the hrtimer that periodically checks for
+ * data in the circular OA buffer, for notifying userspace.
+ */
+static void i915_oa_stream_disable(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ dev_priv->perf.oa.ops.oa_disable(dev_priv);
+
+ if (dev_priv->perf.oa.periodic)
+ hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
+}
+
+static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
+{
+ return div_u64(1000000000ULL * (2ULL << exponent),
+ dev_priv->perf.oa.timestamp_frequency);
+}
+
+static const struct i915_perf_stream_ops i915_oa_stream_ops = {
+ .destroy = i915_oa_stream_destroy,
+ .enable = i915_oa_stream_enable,
+ .disable = i915_oa_stream_disable,
+ .wait_unlocked = i915_oa_wait_unlocked,
+ .poll_wait = i915_oa_poll_wait,
+ .read = i915_oa_read,
+};
+
+/**
+ * i915_oa_stream_init - validate combined props for OA stream and init
+ * @stream: An i915 perf stream
+ * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
+ * @props: The property state that configures stream (individually validated)
+ *
+ * While read_properties_unlocked() validates properties in isolation it
+ * doesn't ensure that the combination necessarily makes sense.
+ *
+ * At this point it has been determined that userspace wants a stream of
+ * OA metrics, but still we need to further validate the combined
+ * properties are OK.
+ *
+ * If the configuration makes sense then we can allocate memory for
+ * a circular OA buffer and apply the requested metric set configuration.
+ *
+ * Returns: zero on success or a negative error code.
+ */
+static int i915_oa_stream_init(struct i915_perf_stream *stream,
+ struct drm_i915_perf_open_param *param,
+ struct perf_open_properties *props)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int format_size;
+ int ret;
+
+ /* If the sysfs metrics/ directory wasn't registered for some
+ * reason then don't let userspace try their luck with config
+ * IDs
+ */
+ if (!dev_priv->perf.metrics_kobj) {
+ DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
+ return -EINVAL;
+ }
+
+ if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
+ DRM_DEBUG("Only OA report sampling supported\n");
+ return -EINVAL;
+ }
+
+ if (!dev_priv->perf.oa.ops.init_oa_buffer) {
+ DRM_DEBUG("OA unit not supported\n");
+ return -ENODEV;
+ }
+
+ /* To avoid the complexity of having to accurately filter
+ * counter reports and marshal to the appropriate client
+ * we currently only allow exclusive access
+ */
+ if (dev_priv->perf.oa.exclusive_stream) {
+ DRM_DEBUG("OA unit already in use\n");
+ return -EBUSY;
+ }
+
+ if (!props->metrics_set) {
+ DRM_DEBUG("OA metric set not specified\n");
+ return -EINVAL;
+ }
+
+ if (!props->oa_format) {
+ DRM_DEBUG("OA report format not specified\n");
+ return -EINVAL;
+ }
+
+ stream->sample_size = sizeof(struct drm_i915_perf_record_header);
+
+ format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
+
+ stream->sample_flags |= SAMPLE_OA_REPORT;
+ stream->sample_size += format_size;
+
+ dev_priv->perf.oa.oa_buffer.format_size = format_size;
+ if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
+ return -EINVAL;
+
+ dev_priv->perf.oa.oa_buffer.format =
+ dev_priv->perf.oa.oa_formats[props->oa_format].format;
+
+ dev_priv->perf.oa.metrics_set = props->metrics_set;
+
+ dev_priv->perf.oa.periodic = props->oa_periodic;
+ if (dev_priv->perf.oa.periodic) {
+ u32 tail;
+
+ dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
+
+ /* See comment for OA_TAIL_MARGIN_NSEC for details
+ * about this tail_margin...
+ */
+ tail = div64_u64(OA_TAIL_MARGIN_NSEC,
+ oa_exponent_to_ns(dev_priv,
+ props->oa_period_exponent));
+ dev_priv->perf.oa.tail_margin = (tail + 1) * format_size;
+ }
+
+ if (stream->ctx) {
+ ret = oa_get_render_ctx_id(stream);
+ if (ret)
+ return ret;
+ }
+
+ ret = alloc_oa_buffer(dev_priv);
+ if (ret)
+ goto err_oa_buf_alloc;
+
+ /* PRM - observability performance counters:
+ *
+ * OACONTROL, performance counter enable, note:
+ *
+ * "When this bit is set, in order to have coherent counts,
+ * RC6 power state and trunk clock gating must be disabled.
+ * This can be achieved by programming MMIO registers as
+ * 0xA094=0 and 0xA090[31]=1"
+ *
+ * In our case we are expecting that taking pm + FORCEWAKE
+ * references will effectively disable RC6.
+ */
+ intel_runtime_pm_get(dev_priv);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv);
+ if (ret)
+ goto err_enable;
+
+ stream->ops = &i915_oa_stream_ops;
+
+ dev_priv->perf.oa.exclusive_stream = stream;
+
+ return 0;
+
+err_enable:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
+ free_oa_buffer(dev_priv);
+
+err_oa_buf_alloc:
+ if (stream->ctx)
+ oa_put_render_ctx_id(stream);
+
+ return ret;
+}
+
+/**
+ * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
+ * @stream: An i915 perf stream
+ * @file: An i915 perf stream file
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @ppos: (inout) file seek position (unused)
+ *
+ * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
+ * ensure that if we've successfully copied any data then reporting that takes
+ * precedence over any internal error status, so the data isn't lost.
+ *
+ * For example ret will be -ENOSPC whenever there is more buffered data than
+ * can be copied to userspace, but that's only interesting if we weren't able
+ * to copy some data because it implies the userspace buffer is too small to
+ * receive a single record (and we never split records).
+ *
+ * Another case with ret == -EFAULT is more of a grey area since it would seem
+ * like bad form for userspace to ask us to overrun its buffer, but the user
+ * knows best:
+ *
+ * http://yarchive.net/comp/linux/partial_reads_writes.html
+ *
+ * Returns: The number of bytes copied or a negative error code on failure.
+ */
+static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
+ struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ /* Note we keep the offset (aka bytes read) separate from any
+ * error status so that the final check for whether we return
+ * the bytes read with a higher precedence than any error (see
+ * comment below) doesn't need to be handled/duplicated in
+ * stream->ops->read() implementations.
+ */
+ size_t offset = 0;
+ int ret = stream->ops->read(stream, buf, count, &offset);
+
+ return offset ?: (ret ?: -EAGAIN);
+}
+
+/**
+ * i915_perf_read - handles read() FOP for i915 perf stream FDs
+ * @file: An i915 perf stream file
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @ppos: (inout) file seek position (unused)
+ *
+ * The entry point for handling a read() on a stream file descriptor from
+ * userspace. Most of the work is left to the i915_perf_read_locked() and
+ * &i915_perf_stream_ops->read but to save having stream implementations (of
+ * which we might have multiple later) we handle blocking read here.
+ *
+ * We can also consistently treat trying to read from a disabled stream
+ * as an IO error so implementations can assume the stream is enabled
+ * while reading.
+ *
+ * Returns: The number of bytes copied or a negative error code on failure.
+ */
+static ssize_t i915_perf_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct i915_perf_stream *stream = file->private_data;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ ssize_t ret;
+
+ /* To ensure it's handled consistently we simply treat all reads of a
+ * disabled stream as an error. In particular it might otherwise lead
+ * to a deadlock for blocking file descriptors...
+ */
+ if (!stream->enabled)
+ return -EIO;
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ /* There's the small chance of false positives from
+ * stream->ops->wait_unlocked.
+ *
+ * E.g. with single context filtering since we only wait until
+ * oabuffer has >= 1 report we don't immediately know whether
+ * any reports really belong to the current context
+ */
+ do {
+ ret = stream->ops->wait_unlocked(stream);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev_priv->perf.lock);
+ ret = i915_perf_read_locked(stream, file,
+ buf, count, ppos);
+ mutex_unlock(&dev_priv->perf.lock);
+ } while (ret == -EAGAIN);
+ } else {
+ mutex_lock(&dev_priv->perf.lock);
+ ret = i915_perf_read_locked(stream, file, buf, count, ppos);
+ mutex_unlock(&dev_priv->perf.lock);
+ }
+
+ if (ret >= 0) {
+ /* Maybe make ->pollin per-stream state if we support multiple
+ * concurrent streams in the future.
+ */
+ dev_priv->perf.oa.pollin = false;
+ }
+
+ return ret;
+}
+
+static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(hrtimer, typeof(*dev_priv),
+ perf.oa.poll_check_timer);
+
+ if (!dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv)) {
+ dev_priv->perf.oa.pollin = true;
+ wake_up(&dev_priv->perf.oa.poll_wq);
+ }
+
+ hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
+
+ return HRTIMER_RESTART;
+}
+
+/**
+ * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
+ * @dev_priv: i915 device instance
+ * @stream: An i915 perf stream
+ * @file: An i915 perf stream file
+ * @wait: poll() state table
+ *
+ * For handling userspace polling on an i915 perf stream, this calls through to
+ * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
+ * will be woken for new stream data.
+ *
+ * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * with any non-file-operation driver hooks.
+ *
+ * Returns: any poll events that are ready without sleeping
+ */
+static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
+ struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait)
+{
+ unsigned int events = 0;
+
+ stream->ops->poll_wait(stream, file, wait);
+
+ /* Note: we don't explicitly check whether there's something to read
+ * here since this path may be very hot depending on what else
+ * userspace is polling, or on the timeout in use. We rely solely on
+ * the hrtimer/oa_poll_check_timer_cb to notify us when there are
+ * samples to read.
+ */
+ if (dev_priv->perf.oa.pollin)
+ events |= POLLIN;
+
+ return events;
+}
+
+/**
+ * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
+ * @file: An i915 perf stream file
+ * @wait: poll() state table
+ *
+ * For handling userspace polling on an i915 perf stream, this ensures
+ * poll_wait() gets called with a wait queue that will be woken for new stream
+ * data.
+ *
+ * Note: Implementation deferred to i915_perf_poll_locked()
+ *
+ * Returns: any poll events that are ready without sleeping
+ */
+static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
+{
+ struct i915_perf_stream *stream = file->private_data;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int ret;
+
+ mutex_lock(&dev_priv->perf.lock);
+ ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
+ mutex_unlock(&dev_priv->perf.lock);
+
+ return ret;
+}
+
+/**
+ * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
+ * @stream: A disabled i915 perf stream
+ *
+ * [Re]enables the associated capture of data for this stream.
+ *
+ * If a stream was previously enabled then there's currently no intention
+ * to provide userspace any guarantee about the preservation of previously
+ * buffered data.
+ */
+static void i915_perf_enable_locked(struct i915_perf_stream *stream)
+{
+ if (stream->enabled)
+ return;
+
+ /* Allow stream->ops->enable() to refer to this */
+ stream->enabled = true;
+
+ if (stream->ops->enable)
+ stream->ops->enable(stream);
+}
+
+/**
+ * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
+ * @stream: An enabled i915 perf stream
+ *
+ * Disables the associated capture of data for this stream.
+ *
+ * The intention is that disabling an re-enabling a stream will ideally be
+ * cheaper than destroying and re-opening a stream with the same configuration,
+ * though there are no formal guarantees about what state or buffered data
+ * must be retained between disabling and re-enabling a stream.
+ *
+ * Note: while a stream is disabled it's considered an error for userspace
+ * to attempt to read from the stream (-EIO).
+ */
+static void i915_perf_disable_locked(struct i915_perf_stream *stream)
+{
+ if (!stream->enabled)
+ return;
+
+ /* Allow stream->ops->disable() to refer to this */
+ stream->enabled = false;
+
+ if (stream->ops->disable)
+ stream->ops->disable(stream);
+}
+
+/**
+ * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
+ * @stream: An i915 perf stream
+ * @cmd: the ioctl request
+ * @arg: the ioctl data
+ *
+ * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * with any non-file-operation driver hooks.
+ *
+ * Returns: zero on success or a negative error code. Returns -EINVAL for
+ * an unknown ioctl request.
+ */
+static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case I915_PERF_IOCTL_ENABLE:
+ i915_perf_enable_locked(stream);
+ return 0;
+ case I915_PERF_IOCTL_DISABLE:
+ i915_perf_disable_locked(stream);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
+ * @file: An i915 perf stream file
+ * @cmd: the ioctl request
+ * @arg: the ioctl data
+ *
+ * Implementation deferred to i915_perf_ioctl_locked().
+ *
+ * Returns: zero on success or a negative error code. Returns -EINVAL for
+ * an unknown ioctl request.
+ */
+static long i915_perf_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct i915_perf_stream *stream = file->private_data;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ long ret;
+
+ mutex_lock(&dev_priv->perf.lock);
+ ret = i915_perf_ioctl_locked(stream, cmd, arg);
+ mutex_unlock(&dev_priv->perf.lock);
+
+ return ret;
+}
+
+/**
+ * i915_perf_destroy_locked - destroy an i915 perf stream
+ * @stream: An i915 perf stream
+ *
+ * Frees all resources associated with the given i915 perf @stream, disabling
+ * any associated data capture in the process.
+ *
+ * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * with any non-file-operation driver hooks.
+ */
+static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
+{
+ if (stream->enabled)
+ i915_perf_disable_locked(stream);
+
+ if (stream->ops->destroy)
+ stream->ops->destroy(stream);
+
+ list_del(&stream->link);
+
+ if (stream->ctx)
+ i915_gem_context_put_unlocked(stream->ctx);
+
+ kfree(stream);
+}
+
+/**
+ * i915_perf_release - handles userspace close() of a stream file
+ * @inode: anonymous inode associated with file
+ * @file: An i915 perf stream file
+ *
+ * Cleans up any resources associated with an open i915 perf stream file.
+ *
+ * NB: close() can't really fail from the userspace point of view.
+ *
+ * Returns: zero on success or a negative error code.
+ */
+static int i915_perf_release(struct inode *inode, struct file *file)
+{
+ struct i915_perf_stream *stream = file->private_data;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ mutex_lock(&dev_priv->perf.lock);
+ i915_perf_destroy_locked(stream);
+ mutex_unlock(&dev_priv->perf.lock);
+
+ return 0;
+}
+
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .release = i915_perf_release,
+ .poll = i915_perf_poll,
+ .read = i915_perf_read,
+ .unlocked_ioctl = i915_perf_ioctl,
+};
+
+
+static struct i915_gem_context *
+lookup_context(struct drm_i915_private *dev_priv,
+ struct drm_i915_file_private *file_priv,
+ u32 ctx_user_handle)
+{
+ struct i915_gem_context *ctx;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ctx = i915_gem_context_lookup(file_priv, ctx_user_handle);
+ if (!IS_ERR(ctx))
+ i915_gem_context_get(ctx);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ return ctx;
+}
+
+/**
+ * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
+ * @dev_priv: i915 device instance
+ * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
+ * @props: individually validated u64 property value pairs
+ * @file: drm file
+ *
+ * See i915_perf_ioctl_open() for interface details.
+ *
+ * Implements further stream config validation and stream initialization on
+ * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
+ * taken to serialize with any non-file-operation driver hooks.
+ *
+ * Note: at this point the @props have only been validated in isolation and
+ * it's still necessary to validate that the combination of properties makes
+ * sense.
+ *
+ * In the case where userspace is interested in OA unit metrics then further
+ * config validation and stream initialization details will be handled by
+ * i915_oa_stream_init(). The code here should only validate config state that
+ * will be relevant to all stream types / backends.
+ *
+ * Returns: zero on success or a negative error code.
+ */
+static int
+i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
+ struct drm_i915_perf_open_param *param,
+ struct perf_open_properties *props,
+ struct drm_file *file)
+{
+ struct i915_gem_context *specific_ctx = NULL;
+ struct i915_perf_stream *stream = NULL;
+ unsigned long f_flags = 0;
+ int stream_fd;
+ int ret;
+
+ if (props->single_context) {
+ u32 ctx_handle = props->ctx_handle;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ specific_ctx = lookup_context(dev_priv, file_priv, ctx_handle);
+ if (IS_ERR(specific_ctx)) {
+ ret = PTR_ERR(specific_ctx);
+ if (ret != -EINTR)
+ DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
+ ctx_handle);
+ goto err;
+ }
+ }
+
+ /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
+ * we check a dev.i915.perf_stream_paranoid sysctl option
+ * to determine if it's ok to access system wide OA counters
+ * without CAP_SYS_ADMIN privileges.
+ */
+ if (!specific_ctx &&
+ i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
+ ret = -EACCES;
+ goto err_ctx;
+ }
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream) {
+ ret = -ENOMEM;
+ goto err_ctx;
+ }
+
+ stream->dev_priv = dev_priv;
+ stream->ctx = specific_ctx;
+
+ ret = i915_oa_stream_init(stream, param, props);
+ if (ret)
+ goto err_alloc;
+
+ /* we avoid simply assigning stream->sample_flags = props->sample_flags
+ * to have _stream_init check the combination of sample flags more
+ * thoroughly, but still this is the expected result at this point.
+ */
+ if (WARN_ON(stream->sample_flags != props->sample_flags)) {
+ ret = -ENODEV;
+ goto err_alloc;
+ }
+
+ list_add(&stream->link, &dev_priv->perf.streams);
+
+ if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
+ f_flags |= O_CLOEXEC;
+ if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
+ f_flags |= O_NONBLOCK;
+
+ stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
+ if (stream_fd < 0) {
+ ret = stream_fd;
+ goto err_open;
+ }
+
+ if (!(param->flags & I915_PERF_FLAG_DISABLED))
+ i915_perf_enable_locked(stream);
+
+ return stream_fd;
+
+err_open:
+ list_del(&stream->link);
+ if (stream->ops->destroy)
+ stream->ops->destroy(stream);
+err_alloc:
+ kfree(stream);
+err_ctx:
+ if (specific_ctx)
+ i915_gem_context_put_unlocked(specific_ctx);
+err:
+ return ret;
+}
+
+/**
+ * read_properties_unlocked - validate + copy userspace stream open properties
+ * @dev_priv: i915 device instance
+ * @uprops: The array of u64 key value pairs given by userspace
+ * @n_props: The number of key value pairs expected in @uprops
+ * @props: The stream configuration built up while validating properties
+ *
+ * Note this function only validates properties in isolation it doesn't
+ * validate that the combination of properties makes sense or that all
+ * properties necessary for a particular kind of stream have been set.
+ *
+ * Note that there currently aren't any ordering requirements for properties so
+ * we shouldn't validate or assume anything about ordering here. This doesn't
+ * rule out defining new properties with ordering requirements in the future.
+ */
+static int read_properties_unlocked(struct drm_i915_private *dev_priv,
+ u64 __user *uprops,
+ u32 n_props,
+ struct perf_open_properties *props)
+{
+ u64 __user *uprop = uprops;
+ int i;
+
+ memset(props, 0, sizeof(struct perf_open_properties));
+
+ if (!n_props) {
+ DRM_DEBUG("No i915 perf properties given\n");
+ return -EINVAL;
+ }
+
+ /* Considering that ID = 0 is reserved and assuming that we don't
+ * (currently) expect any configurations to ever specify duplicate
+ * values for a particular property ID then the last _PROP_MAX value is
+ * one greater than the maximum number of properties we expect to get
+ * from userspace.
+ */
+ if (n_props >= DRM_I915_PERF_PROP_MAX) {
+ DRM_DEBUG("More i915 perf properties specified than exist\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < n_props; i++) {
+ u64 oa_period, oa_freq_hz;
+ u64 id, value;
+ int ret;
+
+ ret = get_user(id, uprop);
+ if (ret)
+ return ret;
+
+ ret = get_user(value, uprop + 1);
+ if (ret)
+ return ret;
+
+ switch ((enum drm_i915_perf_property_id)id) {
+ case DRM_I915_PERF_PROP_CTX_HANDLE:
+ props->single_context = 1;
+ props->ctx_handle = value;
+ break;
+ case DRM_I915_PERF_PROP_SAMPLE_OA:
+ props->sample_flags |= SAMPLE_OA_REPORT;
+ break;
+ case DRM_I915_PERF_PROP_OA_METRICS_SET:
+ if (value == 0 ||
+ value > dev_priv->perf.oa.n_builtin_sets) {
+ DRM_DEBUG("Unknown OA metric set ID\n");
+ return -EINVAL;
+ }
+ props->metrics_set = value;
+ break;
+ case DRM_I915_PERF_PROP_OA_FORMAT:
+ if (value == 0 || value >= I915_OA_FORMAT_MAX) {
+ DRM_DEBUG("Invalid OA report format\n");
+ return -EINVAL;
+ }
+ if (!dev_priv->perf.oa.oa_formats[value].size) {
+ DRM_DEBUG("Invalid OA report format\n");
+ return -EINVAL;
+ }
+ props->oa_format = value;
+ break;
+ case DRM_I915_PERF_PROP_OA_EXPONENT:
+ if (value > OA_EXPONENT_MAX) {
+ DRM_DEBUG("OA timer exponent too high (> %u)\n",
+ OA_EXPONENT_MAX);
+ return -EINVAL;
+ }
+
+ /* Theoretically we can program the OA unit to sample
+ * every 160ns but don't allow that by default unless
+ * root.
+ *
+ * On Haswell the period is derived from the exponent
+ * as:
+ *
+ * period = 80ns * 2^(exponent + 1)
+ */
+ BUILD_BUG_ON(sizeof(oa_period) != 8);
+ oa_period = 80ull * (2ull << value);
+
+ /* This check is primarily to ensure that oa_period <=
+ * UINT32_MAX (before passing to do_div which only
+ * accepts a u32 denominator), but we can also skip
+ * checking anything < 1Hz which implicitly can't be
+ * limited via an integer oa_max_sample_rate.
+ */
+ if (oa_period <= NSEC_PER_SEC) {
+ u64 tmp = NSEC_PER_SEC;
+ do_div(tmp, oa_period);
+ oa_freq_hz = tmp;
+ } else
+ oa_freq_hz = 0;
+
+ if (oa_freq_hz > i915_oa_max_sample_rate &&
+ !capable(CAP_SYS_ADMIN)) {
+ DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
+ i915_oa_max_sample_rate);
+ return -EACCES;
+ }
+
+ props->oa_periodic = true;
+ props->oa_period_exponent = value;
+ break;
+ default:
+ MISSING_CASE(id);
+ DRM_DEBUG("Unknown i915 perf property ID\n");
+ return -EINVAL;
+ }
+
+ uprop += 2;
+ }
+
+ return 0;
+}
+
+/**
+ * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
+ * @dev: drm device
+ * @data: ioctl data copied from userspace (unvalidated)
+ * @file: drm file
+ *
+ * Validates the stream open parameters given by userspace including flags
+ * and an array of u64 key, value pair properties.
+ *
+ * Very little is assumed up front about the nature of the stream being
+ * opened (for instance we don't assume it's for periodic OA unit metrics). An
+ * i915-perf stream is expected to be a suitable interface for other forms of
+ * buffered data written by the GPU besides periodic OA metrics.
+ *
+ * Note we copy the properties from userspace outside of the i915 perf
+ * mutex to avoid an awkward lockdep with mmap_sem.
+ *
+ * Most of the implementation details are handled by
+ * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
+ * mutex for serializing with any non-file-operation driver hooks.
+ *
+ * Return: A newly opened i915 Perf stream file descriptor or negative
+ * error code on failure.
+ */
+int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_perf_open_param *param = data;
+ struct perf_open_properties props;
+ u32 known_open_flags;
+ int ret;
+
+ if (!dev_priv->perf.initialized) {
+ DRM_DEBUG("i915 perf interface not available for this system\n");
+ return -ENOTSUPP;
+ }
+
+ known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
+ I915_PERF_FLAG_FD_NONBLOCK |
+ I915_PERF_FLAG_DISABLED;
+ if (param->flags & ~known_open_flags) {
+ DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
+ return -EINVAL;
+ }
+
+ ret = read_properties_unlocked(dev_priv,
+ u64_to_user_ptr(param->properties_ptr),
+ param->num_properties,
+ &props);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev_priv->perf.lock);
+ ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
+ mutex_unlock(&dev_priv->perf.lock);
+
+ return ret;
+}
+
+/**
+ * i915_perf_register - exposes i915-perf to userspace
+ * @dev_priv: i915 device instance
+ *
+ * In particular OA metric sets are advertised under a sysfs metrics/
+ * directory allowing userspace to enumerate valid IDs that can be
+ * used to open an i915-perf stream.
+ */
+void i915_perf_register(struct drm_i915_private *dev_priv)
+{
+ if (!IS_HASWELL(dev_priv))
+ return;
+
+ if (!dev_priv->perf.initialized)
+ return;
+
+ /* To be sure we're synchronized with an attempted
+ * i915_perf_open_ioctl(); considering that we register after
+ * being exposed to userspace.
+ */
+ mutex_lock(&dev_priv->perf.lock);
+
+ dev_priv->perf.metrics_kobj =
+ kobject_create_and_add("metrics",
+ &dev_priv->drm.primary->kdev->kobj);
+ if (!dev_priv->perf.metrics_kobj)
+ goto exit;
+
+ if (i915_perf_register_sysfs_hsw(dev_priv)) {
+ kobject_put(dev_priv->perf.metrics_kobj);
+ dev_priv->perf.metrics_kobj = NULL;
+ }
+
+exit:
+ mutex_unlock(&dev_priv->perf.lock);
+}
+
+/**
+ * i915_perf_unregister - hide i915-perf from userspace
+ * @dev_priv: i915 device instance
+ *
+ * i915-perf state cleanup is split up into an 'unregister' and
+ * 'deinit' phase where the interface is first hidden from
+ * userspace by i915_perf_unregister() before cleaning up
+ * remaining state in i915_perf_fini().
+ */
+void i915_perf_unregister(struct drm_i915_private *dev_priv)
+{
+ if (!IS_HASWELL(dev_priv))
+ return;
+
+ if (!dev_priv->perf.metrics_kobj)
+ return;
+
+ i915_perf_unregister_sysfs_hsw(dev_priv);
+
+ kobject_put(dev_priv->perf.metrics_kobj);
+ dev_priv->perf.metrics_kobj = NULL;
+}
+
+static struct ctl_table oa_table[] = {
+ {
+ .procname = "perf_stream_paranoid",
+ .data = &i915_perf_stream_paranoid,
+ .maxlen = sizeof(i915_perf_stream_paranoid),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .procname = "oa_max_sample_rate",
+ .data = &i915_oa_max_sample_rate,
+ .maxlen = sizeof(i915_oa_max_sample_rate),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &oa_sample_rate_hard_limit,
+ },
+ {}
+};
+
+static struct ctl_table i915_root[] = {
+ {
+ .procname = "i915",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = oa_table,
+ },
+ {}
+};
+
+static struct ctl_table dev_root[] = {
+ {
+ .procname = "dev",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = i915_root,
+ },
+ {}
+};
+
+/**
+ * i915_perf_init - initialize i915-perf state on module load
+ * @dev_priv: i915 device instance
+ *
+ * Initializes i915-perf state without exposing anything to userspace.
+ *
+ * Note: i915-perf initialization is split into an 'init' and 'register'
+ * phase with the i915_perf_register() exposing state to userspace.
+ */
+void i915_perf_init(struct drm_i915_private *dev_priv)
+{
+ if (!IS_HASWELL(dev_priv))
+ return;
+
+ hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
+ init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
+
+ INIT_LIST_HEAD(&dev_priv->perf.streams);
+ mutex_init(&dev_priv->perf.lock);
+ spin_lock_init(&dev_priv->perf.hook_lock);
+
+ dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
+ dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
+ dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
+ dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
+ dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
+ dev_priv->perf.oa.ops.read = gen7_oa_read;
+ dev_priv->perf.oa.ops.oa_buffer_is_empty =
+ gen7_oa_buffer_is_empty_fop_unlocked;
+
+ dev_priv->perf.oa.timestamp_frequency = 12500000;
+
+ dev_priv->perf.oa.oa_formats = hsw_oa_formats;
+
+ dev_priv->perf.oa.n_builtin_sets =
+ i915_oa_n_builtin_metric_sets_hsw;
+
+ dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
+
+ dev_priv->perf.initialized = true;
+}
+
+/**
+ * i915_perf_fini - Counter part to i915_perf_init()
+ * @dev_priv: i915 device instance
+ */
+void i915_perf_fini(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->perf.initialized)
+ return;
+
+ unregister_sysctl_table(dev_priv->perf.sysctl_header);
+
+ memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
+ dev_priv->perf.initialized = false;
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4f15a3dc6d98..1c8f5b9a7fcd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -62,6 +62,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
(port) == PORT_B ? (b) : (c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c))
+#define _PHY3(phy, a, b, c) ((phy) == DPIO_PHY0 ? (a) : \
+ (phy) == DPIO_PHY1 ? (b) : (c))
+#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
@@ -107,6 +110,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GRDOM_RESET_STATUS (1 << 1)
#define GRDOM_RESET_ENABLE (1 << 0)
+/* BSpec only has register offset, PCI device and bit found empirically */
+#define I830_CLOCK_GATE 0xc8 /* device 0 */
+#define I830_L2_CACHE_CLOCK_GATE_DISABLE (1 << 2)
+
#define GCDGMBUS 0xcc
#define GCFGC2 0xda
@@ -294,7 +301,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Instruction field definitions used by the command parser
*/
#define INSTR_CLIENT_SHIFT 29
-#define INSTR_CLIENT_MASK 0xE0000000
#define INSTR_MI_CLIENT 0x0
#define INSTR_BC_CLIENT 0x2
#define INSTR_RC_CLIENT 0x3
@@ -615,7 +621,344 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8)
#define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4)
-#define OACONTROL _MMIO(0x2360)
+#define GEN7_OACONTROL _MMIO(0x2360)
+#define GEN7_OACONTROL_CTX_MASK 0xFFFFF000
+#define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F
+#define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6
+#define GEN7_OACONTROL_TIMER_ENABLE (1<<5)
+#define GEN7_OACONTROL_FORMAT_A13 (0<<2)
+#define GEN7_OACONTROL_FORMAT_A29 (1<<2)
+#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2<<2)
+#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3<<2)
+#define GEN7_OACONTROL_FORMAT_B4_C8 (4<<2)
+#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5<<2)
+#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6<<2)
+#define GEN7_OACONTROL_FORMAT_C4_B8 (7<<2)
+#define GEN7_OACONTROL_FORMAT_SHIFT 2
+#define GEN7_OACONTROL_PER_CTX_ENABLE (1<<1)
+#define GEN7_OACONTROL_ENABLE (1<<0)
+
+#define GEN8_OACTXID _MMIO(0x2364)
+
+#define GEN8_OACONTROL _MMIO(0x2B00)
+#define GEN8_OA_REPORT_FORMAT_A12 (0<<2)
+#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2)
+#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5<<2)
+#define GEN8_OA_REPORT_FORMAT_C4_B8 (7<<2)
+#define GEN8_OA_REPORT_FORMAT_SHIFT 2
+#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1<<1)
+#define GEN8_OA_COUNTER_ENABLE (1<<0)
+
+#define GEN8_OACTXCONTROL _MMIO(0x2360)
+#define GEN8_OA_TIMER_PERIOD_MASK 0x3F
+#define GEN8_OA_TIMER_PERIOD_SHIFT 2
+#define GEN8_OA_TIMER_ENABLE (1<<1)
+#define GEN8_OA_COUNTER_RESUME (1<<0)
+
+#define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */
+#define GEN7_OABUFFER_OVERRUN_DISABLE (1<<3)
+#define GEN7_OABUFFER_EDGE_TRIGGER (1<<2)
+#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1)
+#define GEN7_OABUFFER_RESUME (1<<0)
+
+#define GEN8_OABUFFER _MMIO(0x2b14)
+
+#define GEN7_OASTATUS1 _MMIO(0x2364)
+#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0
+#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1<<2)
+#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1<<1)
+#define GEN7_OASTATUS1_REPORT_LOST (1<<0)
+
+#define GEN7_OASTATUS2 _MMIO(0x2368)
+#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0
+
+#define GEN8_OASTATUS _MMIO(0x2b08)
+#define GEN8_OASTATUS_OVERRUN_STATUS (1<<3)
+#define GEN8_OASTATUS_COUNTER_OVERFLOW (1<<2)
+#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1<<1)
+#define GEN8_OASTATUS_REPORT_LOST (1<<0)
+
+#define GEN8_OAHEADPTR _MMIO(0x2B0C)
+#define GEN8_OATAILPTR _MMIO(0x2B10)
+
+#define OABUFFER_SIZE_128K (0<<3)
+#define OABUFFER_SIZE_256K (1<<3)
+#define OABUFFER_SIZE_512K (2<<3)
+#define OABUFFER_SIZE_1M (3<<3)
+#define OABUFFER_SIZE_2M (4<<3)
+#define OABUFFER_SIZE_4M (5<<3)
+#define OABUFFER_SIZE_8M (6<<3)
+#define OABUFFER_SIZE_16M (7<<3)
+
+#define OA_MEM_SELECT_GGTT (1<<0)
+
+#define EU_PERF_CNTL0 _MMIO(0xe458)
+
+#define GDT_CHICKEN_BITS _MMIO(0x9840)
+#define GT_NOA_ENABLE 0x00000080
+
+/*
+ * OA Boolean state
+ */
+
+#define OAREPORTTRIG1 _MMIO(0x2740)
+#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
+#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+
+#define OAREPORTTRIG2 _MMIO(0x2744)
+#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
+#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
+#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
+#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
+#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
+#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
+#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
+#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
+#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
+#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
+#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
+#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
+#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
+#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
+#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
+#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
+#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
+#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
+#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
+#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
+#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
+#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
+#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
+#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
+#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
+
+#define OAREPORTTRIG3 _MMIO(0x2748)
+#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG3_NOA_SELECT_8_SHIFT 0
+#define OAREPORTTRIG3_NOA_SELECT_9_SHIFT 4
+#define OAREPORTTRIG3_NOA_SELECT_10_SHIFT 8
+#define OAREPORTTRIG3_NOA_SELECT_11_SHIFT 12
+#define OAREPORTTRIG3_NOA_SELECT_12_SHIFT 16
+#define OAREPORTTRIG3_NOA_SELECT_13_SHIFT 20
+#define OAREPORTTRIG3_NOA_SELECT_14_SHIFT 24
+#define OAREPORTTRIG3_NOA_SELECT_15_SHIFT 28
+
+#define OAREPORTTRIG4 _MMIO(0x274c)
+#define OAREPORTTRIG4_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG4_NOA_SELECT_0_SHIFT 0
+#define OAREPORTTRIG4_NOA_SELECT_1_SHIFT 4
+#define OAREPORTTRIG4_NOA_SELECT_2_SHIFT 8
+#define OAREPORTTRIG4_NOA_SELECT_3_SHIFT 12
+#define OAREPORTTRIG4_NOA_SELECT_4_SHIFT 16
+#define OAREPORTTRIG4_NOA_SELECT_5_SHIFT 20
+#define OAREPORTTRIG4_NOA_SELECT_6_SHIFT 24
+#define OAREPORTTRIG4_NOA_SELECT_7_SHIFT 28
+
+#define OAREPORTTRIG5 _MMIO(0x2750)
+#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
+#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+
+#define OAREPORTTRIG6 _MMIO(0x2754)
+#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
+#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
+#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
+#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
+#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
+#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
+#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
+#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
+#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
+#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
+#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
+#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
+#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
+#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
+#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
+#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
+#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
+#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
+#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
+#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
+#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
+#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
+#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
+#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
+#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
+
+#define OAREPORTTRIG7 _MMIO(0x2758)
+#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG7_NOA_SELECT_8_SHIFT 0
+#define OAREPORTTRIG7_NOA_SELECT_9_SHIFT 4
+#define OAREPORTTRIG7_NOA_SELECT_10_SHIFT 8
+#define OAREPORTTRIG7_NOA_SELECT_11_SHIFT 12
+#define OAREPORTTRIG7_NOA_SELECT_12_SHIFT 16
+#define OAREPORTTRIG7_NOA_SELECT_13_SHIFT 20
+#define OAREPORTTRIG7_NOA_SELECT_14_SHIFT 24
+#define OAREPORTTRIG7_NOA_SELECT_15_SHIFT 28
+
+#define OAREPORTTRIG8 _MMIO(0x275c)
+#define OAREPORTTRIG8_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG8_NOA_SELECT_0_SHIFT 0
+#define OAREPORTTRIG8_NOA_SELECT_1_SHIFT 4
+#define OAREPORTTRIG8_NOA_SELECT_2_SHIFT 8
+#define OAREPORTTRIG8_NOA_SELECT_3_SHIFT 12
+#define OAREPORTTRIG8_NOA_SELECT_4_SHIFT 16
+#define OAREPORTTRIG8_NOA_SELECT_5_SHIFT 20
+#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
+#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
+
+#define OASTARTTRIG1 _MMIO(0x2710)
+#define OASTARTTRIG1_THRESHOLD_COUNT_MASK_MBZ 0xffff0000
+#define OASTARTTRIG1_THRESHOLD_MASK 0xffff
+
+#define OASTARTTRIG2 _MMIO(0x2714)
+#define OASTARTTRIG2_INVERT_A_0 (1<<0)
+#define OASTARTTRIG2_INVERT_A_1 (1<<1)
+#define OASTARTTRIG2_INVERT_A_2 (1<<2)
+#define OASTARTTRIG2_INVERT_A_3 (1<<3)
+#define OASTARTTRIG2_INVERT_A_4 (1<<4)
+#define OASTARTTRIG2_INVERT_A_5 (1<<5)
+#define OASTARTTRIG2_INVERT_A_6 (1<<6)
+#define OASTARTTRIG2_INVERT_A_7 (1<<7)
+#define OASTARTTRIG2_INVERT_A_8 (1<<8)
+#define OASTARTTRIG2_INVERT_A_9 (1<<9)
+#define OASTARTTRIG2_INVERT_A_10 (1<<10)
+#define OASTARTTRIG2_INVERT_A_11 (1<<11)
+#define OASTARTTRIG2_INVERT_A_12 (1<<12)
+#define OASTARTTRIG2_INVERT_A_13 (1<<13)
+#define OASTARTTRIG2_INVERT_A_14 (1<<14)
+#define OASTARTTRIG2_INVERT_A_15 (1<<15)
+#define OASTARTTRIG2_INVERT_B_0 (1<<16)
+#define OASTARTTRIG2_INVERT_B_1 (1<<17)
+#define OASTARTTRIG2_INVERT_B_2 (1<<18)
+#define OASTARTTRIG2_INVERT_B_3 (1<<19)
+#define OASTARTTRIG2_INVERT_C_0 (1<<20)
+#define OASTARTTRIG2_INVERT_C_1 (1<<21)
+#define OASTARTTRIG2_INVERT_D_0 (1<<22)
+#define OASTARTTRIG2_THRESHOLD_ENABLE (1<<23)
+#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1<<24)
+#define OASTARTTRIG2_EVENT_SELECT_0 (1<<28)
+#define OASTARTTRIG2_EVENT_SELECT_1 (1<<29)
+#define OASTARTTRIG2_EVENT_SELECT_2 (1<<30)
+#define OASTARTTRIG2_EVENT_SELECT_3 (1<<31)
+
+#define OASTARTTRIG3 _MMIO(0x2718)
+#define OASTARTTRIG3_NOA_SELECT_MASK 0xf
+#define OASTARTTRIG3_NOA_SELECT_8_SHIFT 0
+#define OASTARTTRIG3_NOA_SELECT_9_SHIFT 4
+#define OASTARTTRIG3_NOA_SELECT_10_SHIFT 8
+#define OASTARTTRIG3_NOA_SELECT_11_SHIFT 12
+#define OASTARTTRIG3_NOA_SELECT_12_SHIFT 16
+#define OASTARTTRIG3_NOA_SELECT_13_SHIFT 20
+#define OASTARTTRIG3_NOA_SELECT_14_SHIFT 24
+#define OASTARTTRIG3_NOA_SELECT_15_SHIFT 28
+
+#define OASTARTTRIG4 _MMIO(0x271c)
+#define OASTARTTRIG4_NOA_SELECT_MASK 0xf
+#define OASTARTTRIG4_NOA_SELECT_0_SHIFT 0
+#define OASTARTTRIG4_NOA_SELECT_1_SHIFT 4
+#define OASTARTTRIG4_NOA_SELECT_2_SHIFT 8
+#define OASTARTTRIG4_NOA_SELECT_3_SHIFT 12
+#define OASTARTTRIG4_NOA_SELECT_4_SHIFT 16
+#define OASTARTTRIG4_NOA_SELECT_5_SHIFT 20
+#define OASTARTTRIG4_NOA_SELECT_6_SHIFT 24
+#define OASTARTTRIG4_NOA_SELECT_7_SHIFT 28
+
+#define OASTARTTRIG5 _MMIO(0x2720)
+#define OASTARTTRIG5_THRESHOLD_COUNT_MASK_MBZ 0xffff0000
+#define OASTARTTRIG5_THRESHOLD_MASK 0xffff
+
+#define OASTARTTRIG6 _MMIO(0x2724)
+#define OASTARTTRIG6_INVERT_A_0 (1<<0)
+#define OASTARTTRIG6_INVERT_A_1 (1<<1)
+#define OASTARTTRIG6_INVERT_A_2 (1<<2)
+#define OASTARTTRIG6_INVERT_A_3 (1<<3)
+#define OASTARTTRIG6_INVERT_A_4 (1<<4)
+#define OASTARTTRIG6_INVERT_A_5 (1<<5)
+#define OASTARTTRIG6_INVERT_A_6 (1<<6)
+#define OASTARTTRIG6_INVERT_A_7 (1<<7)
+#define OASTARTTRIG6_INVERT_A_8 (1<<8)
+#define OASTARTTRIG6_INVERT_A_9 (1<<9)
+#define OASTARTTRIG6_INVERT_A_10 (1<<10)
+#define OASTARTTRIG6_INVERT_A_11 (1<<11)
+#define OASTARTTRIG6_INVERT_A_12 (1<<12)
+#define OASTARTTRIG6_INVERT_A_13 (1<<13)
+#define OASTARTTRIG6_INVERT_A_14 (1<<14)
+#define OASTARTTRIG6_INVERT_A_15 (1<<15)
+#define OASTARTTRIG6_INVERT_B_0 (1<<16)
+#define OASTARTTRIG6_INVERT_B_1 (1<<17)
+#define OASTARTTRIG6_INVERT_B_2 (1<<18)
+#define OASTARTTRIG6_INVERT_B_3 (1<<19)
+#define OASTARTTRIG6_INVERT_C_0 (1<<20)
+#define OASTARTTRIG6_INVERT_C_1 (1<<21)
+#define OASTARTTRIG6_INVERT_D_0 (1<<22)
+#define OASTARTTRIG6_THRESHOLD_ENABLE (1<<23)
+#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1<<24)
+#define OASTARTTRIG6_EVENT_SELECT_4 (1<<28)
+#define OASTARTTRIG6_EVENT_SELECT_5 (1<<29)
+#define OASTARTTRIG6_EVENT_SELECT_6 (1<<30)
+#define OASTARTTRIG6_EVENT_SELECT_7 (1<<31)
+
+#define OASTARTTRIG7 _MMIO(0x2728)
+#define OASTARTTRIG7_NOA_SELECT_MASK 0xf
+#define OASTARTTRIG7_NOA_SELECT_8_SHIFT 0
+#define OASTARTTRIG7_NOA_SELECT_9_SHIFT 4
+#define OASTARTTRIG7_NOA_SELECT_10_SHIFT 8
+#define OASTARTTRIG7_NOA_SELECT_11_SHIFT 12
+#define OASTARTTRIG7_NOA_SELECT_12_SHIFT 16
+#define OASTARTTRIG7_NOA_SELECT_13_SHIFT 20
+#define OASTARTTRIG7_NOA_SELECT_14_SHIFT 24
+#define OASTARTTRIG7_NOA_SELECT_15_SHIFT 28
+
+#define OASTARTTRIG8 _MMIO(0x272c)
+#define OASTARTTRIG8_NOA_SELECT_MASK 0xf
+#define OASTARTTRIG8_NOA_SELECT_0_SHIFT 0
+#define OASTARTTRIG8_NOA_SELECT_1_SHIFT 4
+#define OASTARTTRIG8_NOA_SELECT_2_SHIFT 8
+#define OASTARTTRIG8_NOA_SELECT_3_SHIFT 12
+#define OASTARTTRIG8_NOA_SELECT_4_SHIFT 16
+#define OASTARTTRIG8_NOA_SELECT_5_SHIFT 20
+#define OASTARTTRIG8_NOA_SELECT_6_SHIFT 24
+#define OASTARTTRIG8_NOA_SELECT_7_SHIFT 28
+
+/* CECX_0 */
+#define OACEC_COMPARE_LESS_OR_EQUAL 6
+#define OACEC_COMPARE_NOT_EQUAL 5
+#define OACEC_COMPARE_LESS_THAN 4
+#define OACEC_COMPARE_GREATER_OR_EQUAL 3
+#define OACEC_COMPARE_EQUAL 2
+#define OACEC_COMPARE_GREATER_THAN 1
+#define OACEC_COMPARE_ANY_EQUAL 0
+
+#define OACEC_COMPARE_VALUE_MASK 0xffff
+#define OACEC_COMPARE_VALUE_SHIFT 3
+
+#define OACEC_SELECT_NOA (0<<19)
+#define OACEC_SELECT_PREV (1<<19)
+#define OACEC_SELECT_BOOLEAN (2<<19)
+
+/* CECX_1 */
+#define OACEC_MASK_MASK 0xffff
+#define OACEC_CONSIDERATIONS_MASK 0xffff
+#define OACEC_CONSIDERATIONS_SHIFT 16
+
+#define OACEC0_0 _MMIO(0x2770)
+#define OACEC0_1 _MMIO(0x2774)
+#define OACEC1_0 _MMIO(0x2778)
+#define OACEC1_1 _MMIO(0x277c)
+#define OACEC2_0 _MMIO(0x2780)
+#define OACEC2_1 _MMIO(0x2784)
+#define OACEC3_0 _MMIO(0x2788)
+#define OACEC3_1 _MMIO(0x278c)
+#define OACEC4_0 _MMIO(0x2790)
+#define OACEC4_1 _MMIO(0x2794)
+#define OACEC5_0 _MMIO(0x2798)
+#define OACEC5_1 _MMIO(0x279c)
+#define OACEC6_0 _MMIO(0x27a0)
+#define OACEC6_1 _MMIO(0x27a4)
+#define OACEC7_0 _MMIO(0x27a8)
+#define OACEC7_1 _MMIO(0x27ac)
+
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
@@ -708,9 +1051,15 @@ enum skl_disp_power_wells {
/* These numbers are fixed and must match the position of the pw bits */
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_DDI_A_E,
+ GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
SKL_DISP_PW_DDI_B,
SKL_DISP_PW_DDI_C,
SKL_DISP_PW_DDI_D,
+
+ GLK_DISP_PW_AUX_A = 8,
+ GLK_DISP_PW_AUX_B,
+ GLK_DISP_PW_AUX_C,
+
SKL_DISP_PW_1 = 14,
SKL_DISP_PW_2,
@@ -720,6 +1069,7 @@ enum skl_disp_power_wells {
BXT_DPIO_CMN_A,
BXT_DPIO_CMN_BC,
+ GLK_DPIO_CMN_C,
};
#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
@@ -1188,8 +1538,10 @@ enum skl_disp_power_wells {
/* BXT PHY registers */
#define _BXT_PHY0_BASE 0x6C000
#define _BXT_PHY1_BASE 0x162000
-#define BXT_PHY_BASE(phy) _PIPE((phy), _BXT_PHY0_BASE, \
- _BXT_PHY1_BASE)
+#define _BXT_PHY2_BASE 0x163000
+#define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE, \
+ _BXT_PHY2_BASE)
#define _BXT_PHY(phy, reg) \
_MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
@@ -1201,7 +1553,6 @@ enum skl_disp_power_wells {
_MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
-#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
#define _BXT_PHY_CTL_DDI_A 0x64C00
#define _BXT_PHY_CTL_DDI_B 0x64C10
@@ -1214,9 +1565,11 @@ enum skl_disp_power_wells {
#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
+#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP)
+#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP, \
+ _PHY_CTL_FAMILY_DDI_C)
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
@@ -1225,6 +1578,8 @@ enum skl_disp_power_wells {
#define PORT_PLL_ENABLE (1 << 31)
#define PORT_PLL_LOCK (1 << 30)
#define PORT_PLL_REF_SEL (1 << 27)
+#define PORT_PLL_POWER_ENABLE (1 << 26)
+#define PORT_PLL_POWER_STATE (1 << 25)
#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
#define _PORT_PLL_EBB_0_A 0x162034
@@ -1435,6 +1790,21 @@ enum skl_disp_power_wells {
#define DEEMPH_SHIFT 24
#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
+#define _PORT_TX_DW5_LN0_A 0x162514
+#define _PORT_TX_DW5_LN0_B 0x6C514
+#define _PORT_TX_DW5_LN0_C 0x6C914
+#define _PORT_TX_DW5_GRP_A 0x162D14
+#define _PORT_TX_DW5_GRP_B 0x6CD14
+#define _PORT_TX_DW5_GRP_C 0x6CF14
+#define BXT_PORT_TX_DW5_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW5_LN0_B, \
+ _PORT_TX_DW5_LN0_C)
+#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW5_GRP_B, \
+ _PORT_TX_DW5_GRP_C)
+#define DCC_DELAY_RANGE_1 (1 << 9)
+#define DCC_DELAY_RANGE_2 (1 << 8)
+
#define _PORT_TX_DW14_LN0_A 0x162538
#define _PORT_TX_DW14_LN0_B 0x6C538
#define _PORT_TX_DW14_LN0_C 0x6C938
@@ -2936,7 +3306,7 @@ enum skl_disp_power_wells {
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
- (IS_BROXTON(dev_priv) ? \
+ (IS_GEN9_LP(dev_priv) ? \
INTERVAL_0_833_US(us) : \
INTERVAL_1_33_US(us)) : \
INTERVAL_1_28_US(us))
@@ -2945,7 +3315,7 @@ enum skl_disp_power_wells {
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
- (IS_BROXTON(dev_priv) ? \
+ (IS_GEN9_LP(dev_priv) ? \
INTERVAL_0_833_TO_US(interval) : \
INTERVAL_1_33_TO_US(interval)) : \
INTERVAL_1_28_TO_US(interval))
@@ -2953,8 +3323,10 @@ enum skl_disp_power_wells {
/*
* Logical Context regs
*/
-#define CCID _MMIO(0x2180)
-#define CCID_EN (1<<0)
+#define CCID _MMIO(0x2180)
+#define CCID_EN BIT(0)
+#define CCID_EXTENDED_STATE_RESTORE BIT(2)
+#define CCID_EXTENDED_STATE_SAVE BIT(3)
/*
* Notes on SNB/IVB/VLV context size:
* - Power context is saved elsewhere (LLC or stolen)
@@ -3243,9 +3615,12 @@ enum {
#define EDP_PSR_PERF_CNT_MASK 0xffffff
#define EDP_PSR_DEBUG_CTL _MMIO(dev_priv->psr_mmio_base + 0x60)
-#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
-#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
-#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
+#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
+#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
+#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16)
+#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15)
#define EDP_PSR2_CTL _MMIO(0x6f900)
#define EDP_PSR2_ENABLE (1<<31)
@@ -3260,6 +3635,11 @@ enum {
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
#define EDP_PSR2_IDLE_MASK 0xf
+#define EDP_FRAMES_BEFORE_SU_ENTRY (1<<4)
+
+#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940)
+#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
+#define EDP_PSR2_STATUS_STATE_SHIFT 28
/* VGA port control */
#define ADPA _MMIO(0x61100)
@@ -5390,18 +5770,21 @@ enum {
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
-#define SPCNTR(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
-#define SPLINOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
-#define SPSTRIDE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
-#define SPPOS(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
-#define SPSIZE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
-#define SPKEYMINVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
-#define SPKEYMSK(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
-#define SPSURF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
-#define SPKEYMAXVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
-#define SPTILEOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
-#define SPCONSTALPHA(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
-#define SPGAMC(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
+#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
+
+#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
/*
* CHV pipe B sprite CSC
@@ -5410,29 +5793,32 @@ enum {
* |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
* |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
*/
-#define SPCSCYGOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
-#define SPCSCCBOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
-#define SPCSCCROFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
+#define _MMIO_CHV_SPCSC(plane_id, reg) \
+ _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg))
+
+#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900)
+#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904)
+#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908)
#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
-#define SPCSCC01(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
-#define SPCSCC23(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
-#define SPCSCC45(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
-#define SPCSCC67(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
-#define SPCSCC8(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
+#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c)
+#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910)
+#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914)
+#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918)
+#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c)
#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
-#define SPCSCYGICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
-#define SPCSCCBICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
-#define SPCSCCRICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
+#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920)
+#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924)
+#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928)
#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
-#define SPCSCYGOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
-#define SPCSCCBOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
-#define SPCSCCROCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
+#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c)
+#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930)
+#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934)
#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
@@ -6086,6 +6472,12 @@ enum {
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+#define CHICKEN_TRANS_A 0x420c0
+#define CHICKEN_TRANS_B 0x420c4
+#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
+#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
+
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE (1<<31)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
@@ -6930,6 +7322,7 @@ enum {
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define GEN6_UCGCTL3 _MMIO(0x9408)
+# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
#define GEN7_UCGCTL4 _MMIO(0x940c)
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
@@ -8315,6 +8708,21 @@ enum {
#define BXT_PIPE_SELECT_SHIFT 7
#define BXT_PIPE_SELECT_MASK (7 << 7)
#define BXT_PIPE_SELECT(pipe) ((pipe) << 7)
+#define GLK_PHY_STATUS_PORT_READY (1 << 31) /* RO */
+#define GLK_ULPS_NOT_ACTIVE (1 << 30) /* RO */
+#define GLK_MIPIIO_RESET_RELEASED (1 << 28)
+#define GLK_CLOCK_LANE_STOP_STATE (1 << 27) /* RO */
+#define GLK_DATA_LANE_STOP_STATE (1 << 26) /* RO */
+#define GLK_LP_WAKE (1 << 22)
+#define GLK_LP11_LOW_PWR_MODE (1 << 21)
+#define GLK_LP00_LOW_PWR_MODE (1 << 20)
+#define GLK_FIREWALL_ENABLE (1 << 16)
+#define BXT_PIXEL_OVERLAP_CNT_MASK (0xf << 10)
+#define BXT_PIXEL_OVERLAP_CNT_SHIFT 10
+#define BXT_DSC_ENABLE (1 << 3)
+#define BXT_RGB_FLIP (1 << 2)
+#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
+#define GLK_MIPIIO_ENABLE (1 << 0)
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index b0e1e7ca75da..5c86925a0294 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -56,13 +56,12 @@ static void i915_restore_display(struct drm_i915_private *dev_priv)
i915_redisable_vga(dev_priv);
}
-int i915_save_state(struct drm_device *dev)
+int i915_save_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
i915_save_display(dev_priv);
@@ -97,18 +96,17 @@ int i915_save_state(struct drm_device *dev)
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
}
-int i915_restore_state(struct drm_device *dev)
+int i915_restore_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_restore_fences(dev_priv);
@@ -145,9 +143,9 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_i2c_reset(dev);
+ intel_i2c_reset(dev_priv);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 147420ccf49c..40f4e5efaf83 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -17,6 +17,93 @@
static DEFINE_SPINLOCK(i915_sw_fence_lock);
+enum {
+ DEBUG_FENCE_IDLE = 0,
+ DEBUG_FENCE_NOTIFY,
+};
+
+#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
+
+static void *i915_sw_fence_debug_hint(void *addr)
+{
+ return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
+}
+
+static struct debug_obj_descr i915_sw_fence_debug_descr = {
+ .name = "i915_sw_fence",
+ .debug_hint = i915_sw_fence_debug_hint,
+};
+
+static inline void debug_fence_init(struct i915_sw_fence *fence)
+{
+ debug_object_init(fence, &i915_sw_fence_debug_descr);
+}
+
+static inline void debug_fence_activate(struct i915_sw_fence *fence)
+{
+ debug_object_activate(fence, &i915_sw_fence_debug_descr);
+}
+
+static inline void debug_fence_set_state(struct i915_sw_fence *fence,
+ int old, int new)
+{
+ debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new);
+}
+
+static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
+{
+ debug_object_deactivate(fence, &i915_sw_fence_debug_descr);
+}
+
+static inline void debug_fence_destroy(struct i915_sw_fence *fence)
+{
+ debug_object_destroy(fence, &i915_sw_fence_debug_descr);
+}
+
+static inline void debug_fence_free(struct i915_sw_fence *fence)
+{
+ debug_object_free(fence, &i915_sw_fence_debug_descr);
+ smp_wmb(); /* flush the change in state before reallocation */
+}
+
+static inline void debug_fence_assert(struct i915_sw_fence *fence)
+{
+ debug_object_assert_init(fence, &i915_sw_fence_debug_descr);
+}
+
+#else
+
+static inline void debug_fence_init(struct i915_sw_fence *fence)
+{
+}
+
+static inline void debug_fence_activate(struct i915_sw_fence *fence)
+{
+}
+
+static inline void debug_fence_set_state(struct i915_sw_fence *fence,
+ int old, int new)
+{
+}
+
+static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
+{
+}
+
+static inline void debug_fence_destroy(struct i915_sw_fence *fence)
+{
+}
+
+static inline void debug_fence_free(struct i915_sw_fence *fence)
+{
+}
+
+static inline void debug_fence_assert(struct i915_sw_fence *fence)
+{
+}
+
+#endif
+
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
{
@@ -26,25 +113,37 @@ static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
return fn(fence, state);
}
-static void i915_sw_fence_free(struct kref *kref)
+#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
+void i915_sw_fence_fini(struct i915_sw_fence *fence)
+{
+ debug_fence_free(fence);
+}
+#endif
+
+static void i915_sw_fence_release(struct kref *kref)
{
struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref);
WARN_ON(atomic_read(&fence->pending) > 0);
+ debug_fence_destroy(fence);
- if (fence->flags & I915_SW_FENCE_MASK)
+ if (fence->flags & I915_SW_FENCE_MASK) {
__i915_sw_fence_notify(fence, FENCE_FREE);
- else
+ } else {
+ i915_sw_fence_fini(fence);
kfree(fence);
+ }
}
static void i915_sw_fence_put(struct i915_sw_fence *fence)
{
- kref_put(&fence->kref, i915_sw_fence_free);
+ debug_fence_assert(fence);
+ kref_put(&fence->kref, i915_sw_fence_release);
}
static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence)
{
+ debug_fence_assert(fence);
kref_get(&fence->kref);
return fence;
}
@@ -56,6 +155,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
wait_queue_t *pos, *next;
unsigned long flags;
+ debug_fence_deactivate(fence);
atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
/*
@@ -88,23 +188,33 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
} while (1);
}
spin_unlock_irqrestore(&x->lock, flags);
+
+ debug_fence_assert(fence);
}
static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
struct list_head *continuation)
{
+ debug_fence_assert(fence);
+
if (!atomic_dec_and_test(&fence->pending))
return;
+ debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY);
+
if (fence->flags & I915_SW_FENCE_MASK &&
__i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
return;
+ debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE);
+
__i915_sw_fence_wake_up_all(fence, continuation);
}
static void i915_sw_fence_complete(struct i915_sw_fence *fence)
{
+ debug_fence_assert(fence);
+
if (WARN_ON(i915_sw_fence_done(fence)))
return;
@@ -113,6 +223,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence)
static void i915_sw_fence_await(struct i915_sw_fence *fence)
{
+ debug_fence_assert(fence);
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
}
@@ -123,18 +234,26 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
{
BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
+ debug_fence_init(fence);
+
__init_waitqueue_head(&fence->wait, name, key);
kref_init(&fence->kref);
atomic_set(&fence->pending, 1);
fence->flags = (unsigned long)fn;
}
-void i915_sw_fence_commit(struct i915_sw_fence *fence)
+static void __i915_sw_fence_commit(struct i915_sw_fence *fence)
{
i915_sw_fence_complete(fence);
i915_sw_fence_put(fence);
}
+void i915_sw_fence_commit(struct i915_sw_fence *fence)
+{
+ debug_fence_activate(fence);
+ __i915_sw_fence_commit(fence);
+}
+
static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
{
list_del(&wq->task_list);
@@ -206,9 +325,13 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
unsigned long flags;
int pending;
+ debug_fence_assert(fence);
+
if (i915_sw_fence_done(signaler))
return 0;
+ debug_fence_assert(signaler);
+
/* The dependency graph must be acyclic. */
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
@@ -279,7 +402,7 @@ static void timer_i915_sw_fence_wake(unsigned long data)
dma_fence_put(cb->dma);
cb->dma = NULL;
- i915_sw_fence_commit(cb->fence);
+ __i915_sw_fence_commit(cb->fence);
cb->timer.function = NULL;
}
@@ -290,7 +413,7 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
del_timer_sync(&cb->timer);
if (cb->timer.function)
- i915_sw_fence_commit(cb->fence);
+ __i915_sw_fence_commit(cb->fence);
dma_fence_put(cb->dma);
kfree(cb);
@@ -304,6 +427,8 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct i915_sw_dma_fence_cb *cb;
int ret;
+ debug_fence_assert(fence);
+
if (dma_fence_is_signaled(dma))
return 0;
@@ -349,6 +474,8 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence *excl;
int ret = 0, pending;
+ debug_fence_assert(fence);
+
if (write) {
struct dma_fence **shared;
unsigned int count, i;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 0f3185ef7f4e..d31cefbbcc04 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -56,6 +56,12 @@ do { \
__i915_sw_fence_init((fence), (fn), NULL, NULL)
#endif
+#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
+void i915_sw_fence_fini(struct i915_sw_fence *fence);
+#else
+static inline void i915_sw_fence_fini(struct i915_sw_fence *fence) {}
+#endif
+
void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 3df8d3dd31cd..376ac957cd1c 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -58,7 +58,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
units = 1;
div = 1200; /* 833.33ns */
}
@@ -535,7 +535,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
if (ret)
return ret;
- error_priv.dev = dev;
+ error_priv.i915 = dev_priv;
i915_error_state_get(dev, &error_priv);
ret = i915_error_state_to_str(&error_str, &error_priv);
@@ -560,7 +560,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
DRM_DEBUG_DRIVER("Resetting error state\n");
- i915_destroy_error_state(&dev_priv->drm);
+ i915_destroy_error_state(dev_priv);
return count;
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index c5d210ebaa9a..4461df5a94fe 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -406,7 +406,7 @@ TRACE_EVENT(i915_gem_evict,
),
TP_fast_assign(
- __entry->dev = vm->dev->primary->index;
+ __entry->dev = vm->i915->drm.primary->index;
__entry->vm = vm;
__entry->size = size;
__entry->align = align;
@@ -443,13 +443,41 @@ TRACE_EVENT(i915_gem_evict_vm,
),
TP_fast_assign(
- __entry->dev = vm->dev->primary->index;
+ __entry->dev = vm->i915->drm.primary->index;
__entry->vm = vm;
),
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
+TRACE_EVENT(i915_gem_evict_node,
+ TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
+ TP_ARGS(vm, node, flags),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(struct i915_address_space *, vm)
+ __field(u64, start)
+ __field(u64, size)
+ __field(unsigned long, color)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = vm->i915->drm.primary->index;
+ __entry->vm = vm;
+ __entry->start = node->start;
+ __entry->size = node->size;
+ __entry->color = node->color;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x",
+ __entry->dev, __entry->vm,
+ __entry->start, __entry->size,
+ __entry->color, __entry->flags)
+);
+
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from),
@@ -711,7 +739,7 @@ DECLARE_EVENT_CLASS(i915_ppgtt,
TP_fast_assign(
__entry->vm = vm;
- __entry->dev = vm->dev->primary->index;
+ __entry->dev = vm->i915->drm.primary->index;
),
TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
new file mode 100644
index 000000000000..34020873e1f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_UTILS_H
+#define __I915_UTILS_H
+
+#define range_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ > max__ || size__ > max__ - start__; \
+})
+
+#define range_overflows_t(type, start, size, max) \
+ range_overflows((type)(start), (type)(size), (type)(max))
+
+/* Note we don't consider signbits :| */
+#define overflows_type(x, T) \
+ (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+
+#define ptr_mask_bits(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v & PAGE_MASK); \
+})
+
+#define ptr_unpack_bits(ptr, bits) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (bits) = __v & ~PAGE_MASK; \
+ (typeof(ptr))(__v & PAGE_MASK); \
+})
+
+#define ptr_pack_bits(ptr, bits) \
+ ((typeof(ptr))((unsigned long)(ptr) | (bits)))
+
+#define fetch_and_zero(ptr) ({ \
+ typeof(*ptr) __T = *(ptr); \
+ *(ptr) = (typeof(*ptr))0; \
+ __T; \
+})
+
+#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dae340cfc6c7..d0abfd08a01c 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -116,22 +116,20 @@ void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
memset(&bl_info, 0, sizeof(bl_info));
}
-static int vgt_balloon_space(struct drm_mm *mm,
+static int vgt_balloon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node,
unsigned long start, unsigned long end)
{
unsigned long size = end - start;
- if (start == end)
+ if (start >= end)
return -EINVAL;
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
-
- node->start = start;
- node->size = size;
-
- return drm_mm_reserve_node(mm, node);
+ return i915_gem_gtt_reserve(&ggtt->base, node,
+ size, start, I915_COLOR_UNEVICTABLE,
+ 0);
}
/**
@@ -214,10 +212,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
/* Unmappable graphic memory ballooning */
if (unmappable_base > ggtt->mappable_end) {
- ret = vgt_balloon_space(&ggtt->base.mm,
- &bl_info.space[2],
- ggtt->mappable_end,
- unmappable_base);
+ ret = vgt_balloon_space(ggtt, &bl_info.space[2],
+ ggtt->mappable_end, unmappable_base);
if (ret)
goto err;
@@ -228,18 +224,15 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
* because it is reserved to the guard page.
*/
if (unmappable_end < ggtt_end - PAGE_SIZE) {
- ret = vgt_balloon_space(&ggtt->base.mm,
- &bl_info.space[3],
- unmappable_end,
- ggtt_end - PAGE_SIZE);
+ ret = vgt_balloon_space(ggtt, &bl_info.space[3],
+ unmappable_end, ggtt_end - PAGE_SIZE);
if (ret)
goto err;
}
/* Mappable graphic memory ballooning */
if (mappable_base > ggtt->base.start) {
- ret = vgt_balloon_space(&ggtt->base.mm,
- &bl_info.space[0],
+ ret = vgt_balloon_space(ggtt, &bl_info.space[0],
ggtt->base.start, mappable_base);
if (ret)
@@ -247,10 +240,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
}
if (mappable_end < ggtt->mappable_end) {
- ret = vgt_balloon_space(&ggtt->base.mm,
- &bl_info.space[1],
- mappable_end,
- ggtt->mappable_end);
+ ret = vgt_balloon_space(ggtt, &bl_info.space[1],
+ mappable_end, ggtt->mappable_end);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e924a9516079..155906e84812 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -45,6 +45,7 @@ i915_vma_retire(struct i915_gem_active *active,
if (i915_vma_is_active(vma))
return;
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
WARN_ON(i915_vma_unbind(vma));
@@ -69,17 +70,15 @@ i915_vma_retire(struct i915_gem_active *active,
}
static struct i915_vma *
-__i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
struct rb_node *rb, **p;
int i;
- GEM_BUG_ON(vm->closed);
-
- vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+ vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -87,24 +86,50 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
init_request_active(&vma->last_fence, NULL);
- list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm;
vma->obj = obj;
vma->size = obj->base.size;
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- if (view) {
+ if (view && view->type != I915_GGTT_VIEW_NORMAL) {
vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) {
- vma->size = view->params.partial.size;
+ GEM_BUG_ON(range_overflows_t(u64,
+ view->partial.offset,
+ view->partial.size,
+ obj->base.size >> PAGE_SHIFT));
+ vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
+ GEM_BUG_ON(vma->size >= obj->base.size);
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
- vma->size =
- intel_rotation_info_size(&view->params.rotated);
+ vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
}
}
+ if (unlikely(vma->size > vm->total))
+ goto err_vma;
+
+ GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
+
if (i915_is_ggtt(vm)) {
+ if (unlikely(overflows_type(vma->size, u32)))
+ goto err_vma;
+
+ vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
+ i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj));
+ if (unlikely(vma->fence_size < vma->size || /* overflow */
+ vma->fence_size > vm->total))
+ goto err_vma;
+
+ GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
+
+ vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
+ i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj));
+ GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
+
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
@@ -126,20 +151,74 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
}
rb_link_node(&vma->obj_node, rb, p);
rb_insert_color(&vma->obj_node, &obj->vma_tree);
+ list_add(&vma->vm_link, &vm->unbound_list);
return vma;
+
+err_vma:
+ kmem_cache_free(vm->i915->vmas, vma);
+ return ERR_PTR(-E2BIG);
}
+static struct i915_vma *
+vma_lookup(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ struct rb_node *rb;
+
+ rb = obj->vma_tree.rb_node;
+ while (rb) {
+ struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
+ long cmp;
+
+ cmp = i915_vma_compare(vma, vm, view);
+ if (cmp == 0)
+ return vma;
+
+ if (cmp < 0)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
+ }
+
+ return NULL;
+}
+
+/**
+ * i915_vma_instance - return the singleton instance of the VMA
+ * @obj: parent &struct drm_i915_gem_object to be mapped
+ * @vm: address space in which the mapping is located
+ * @view: additional mapping requirements
+ *
+ * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
+ * the same @view characteristics. If a match is not found, one is created.
+ * Once created, the VMA is kept until either the object is freed, or the
+ * address space is closed.
+ *
+ * Must be called with struct_mutex held.
+ *
+ * Returns the vma, or an error pointer.
+ */
struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+i915_vma_instance(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
+ struct i915_vma *vma;
+
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+ GEM_BUG_ON(vm->closed);
+
+ vma = vma_lookup(obj, vm, view);
+ if (!vma)
+ vma = vma_create(obj, vm, view);
- return __i915_vma_create(obj, vm, view);
+ GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
+ GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
+ GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
+ return vma;
}
/**
@@ -176,6 +255,11 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (bind_flags == 0)
return 0;
+ if (GEM_WARN_ON(range_overflows(vma->node.start,
+ vma->node.size,
+ vma->vm->total)))
+ return -ENODEV;
+
if (vma_flags == 0 && vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma);
ret = vma->vm->allocate_va_range(vma->vm,
@@ -199,9 +283,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void __iomem *ptr;
/* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+ assert_rpm_wakelock_held(vma->vm->i915);
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
return IO_ERR_PTR(-ENODEV);
@@ -249,7 +333,8 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (vma->node.size < size)
return true;
- if (alignment && vma->node.start & (alignment - 1))
+ GEM_BUG_ON(alignment && !is_power_of_2(alignment));
+ if (alignment && !IS_ALIGNED(vma->node.start, alignment))
return true;
if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
@@ -268,40 +353,37 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
- struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool mappable, fenceable;
- u32 fence_size, fence_alignment;
-
- fence_size = i915_gem_get_ggtt_size(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj));
- fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj),
- true);
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
- mappable = (vma->node.start + fence_size <=
- dev_priv->ggtt.mappable_end);
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(!vma->fence_size);
/*
* Explicitly disable for rotated VMA since the display does not
* need the fence and the VMA is not accessible to other users.
*/
- if (mappable && fenceable &&
- vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
+ return;
+
+ fenceable = (vma->node.size >= vma->fence_size &&
+ IS_ALIGNED(vma->node.start, vma->fence_alignment));
+
+ mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
+
+ if (mappable && fenceable)
vma->flags |= I915_VMA_CAN_FENCE;
else
vma->flags &= ~I915_VMA_CAN_FENCE;
}
-bool i915_gem_valid_gtt_space(struct i915_vma *vma,
- unsigned long cache_level)
+static bool color_differs(struct drm_mm_node *node, unsigned long color)
+{
+ return node->allocated && node->color != color;
+}
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
{
- struct drm_mm_node *gtt_space = &vma->node;
+ struct drm_mm_node *node = &vma->node;
struct drm_mm_node *other;
/*
@@ -314,18 +396,16 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma,
if (vma->vm->mm.color_adjust == NULL)
return true;
- if (!drm_mm_node_allocated(gtt_space))
- return true;
-
- if (list_empty(&gtt_space->node_list))
- return true;
+ /* Only valid to be called on an already inserted vma */
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
+ GEM_BUG_ON(list_empty(&node->node_list));
- other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
- if (other->allocated && !other->hole_follows && other->color != cache_level)
+ other = list_prev_entry(node, node_list);
+ if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
return false;
- other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
- if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ other = list_next_entry(node, node_list);
+ if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
return false;
return true;
@@ -348,7 +428,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma,
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+ struct drm_i915_private *dev_priv = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
u64 start, end;
int ret;
@@ -357,22 +437,26 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
- if (flags & PIN_MAPPABLE)
- size = i915_gem_get_ggtt_size(dev_priv, size,
- i915_gem_object_get_tiling(obj));
+ alignment = max(alignment, vma->display_alignment);
+ if (flags & PIN_MAPPABLE) {
+ size = max_t(typeof(size), size, vma->fence_size);
+ alignment = max_t(typeof(alignment),
+ alignment, vma->fence_alignment);
+ }
- alignment = max(max(alignment, vma->display_alignment),
- i915_gem_get_ggtt_alignment(dev_priv, size,
- i915_gem_object_get_tiling(obj),
- flags & PIN_MAPPABLE));
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
+ GEM_BUG_ON(!is_power_of_2(alignment));
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+ GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
end = vma->vm->total;
if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->ggtt.mappable_end);
if (flags & PIN_ZONE_4G)
- end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
+ end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
+ GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
/* If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain
@@ -392,64 +476,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
- if (offset & (alignment - 1) || offset > end - size) {
+ if (!IS_ALIGNED(offset, alignment) ||
+ range_overflows(offset, size, end)) {
ret = -EINVAL;
goto err_unpin;
}
- vma->node.start = offset;
- vma->node.size = size;
- vma->node.color = obj->cache_level;
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
- if (ret) {
- ret = i915_gem_evict_for_vma(vma);
- if (ret == 0)
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
- if (ret)
- goto err_unpin;
- }
+ ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
+ size, offset, obj->cache_level,
+ flags);
+ if (ret)
+ goto err_unpin;
} else {
- u32 search_flag, alloc_flag;
-
- if (flags & PIN_HIGH) {
- search_flag = DRM_MM_SEARCH_BELOW;
- alloc_flag = DRM_MM_CREATE_TOP;
- } else {
- search_flag = DRM_MM_SEARCH_DEFAULT;
- alloc_flag = DRM_MM_CREATE_DEFAULT;
- }
-
- /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
- * so we know that we always have a minimum alignment of 4096.
- * The drm_mm range manager is optimised to return results
- * with zero alignment, so where possible use the optimal
- * path.
- */
- if (alignment <= 4096)
- alignment = 0;
-
-search_free:
- ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
- &vma->node,
- size, alignment,
- obj->cache_level,
- start, end,
- search_flag,
- alloc_flag);
- if (ret) {
- ret = i915_gem_evict_something(vma->vm, size, alignment,
- obj->cache_level,
- start, end,
- flags);
- if (ret == 0)
- goto search_free;
-
+ ret = i915_gem_gtt_insert(vma->vm, &vma->node,
+ size, alignment, obj->cache_level,
+ start, end, flags);
+ if (ret)
goto err_unpin;
- }
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
@@ -470,7 +518,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
unsigned int bound = vma->flags;
int ret;
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
@@ -492,6 +540,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
@@ -568,7 +617,7 @@ int i915_vma_unbind(struct i915_vma *vma)
for_each_active(active, idx) {
ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->dev->struct_mutex);
+ &vma->vm->i915->drm.struct_mutex);
if (ret)
break;
}
@@ -629,6 +678,7 @@ int i915_vma_unbind(struct i915_vma *vma)
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
destroy:
if (unlikely(i915_vma_is_closed(vma)))
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 85446f0b0b3f..e39d922cfb6f 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,6 +55,9 @@ struct i915_vma {
u64 size;
u64 display_alignment;
+ u32 fence_size;
+ u32 fence_alignment;
+
unsigned int flags;
/**
* How many users have pinned this object in GTT space. The following
@@ -109,9 +112,9 @@ struct i915_vma {
};
struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
+i915_vma_instance(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
void i915_vma_unpin_and_release(struct i915_vma **p_vma);
@@ -178,25 +181,48 @@ static inline void i915_vma_put(struct i915_vma *vma)
i915_gem_object_put(vma->obj);
}
+static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
+{
+ return a - b;
+}
+
static inline long
i915_vma_compare(struct i915_vma *vma,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
+ ptrdiff_t cmp;
+
GEM_BUG_ON(view && !i915_is_ggtt(vm));
- if (vma->vm != vm)
- return vma->vm - vm;
+ cmp = ptrdiff(vma->vm, vm);
+ if (cmp)
+ return cmp;
+ BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
+ cmp = vma->ggtt_view.type;
if (!view)
- return vma->ggtt_view.type;
+ return cmp;
- if (vma->ggtt_view.type != view->type)
- return vma->ggtt_view.type - view->type;
+ cmp -= view->type;
+ if (cmp)
+ return cmp;
- return memcmp(&vma->ggtt_view.params,
- &view->params,
- sizeof(view->params));
+ /* ggtt_view.type also encodes its size so that we both distinguish
+ * different views using it as a "type" and also use a compact (no
+ * accessing of uninitialised padding bytes) memcmp without storing
+ * an extra parameter or adding more code.
+ *
+ * To ensure that the memcmp is valid for all branches of the union,
+ * even though the code looks like it is just comparing one branch,
+ * we assert above that all branches have the same address, and that
+ * each branch has a unique type/size.
+ */
+ BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
+ BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
+ BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
+ offsetof(typeof(*view), partial));
+ return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
}
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
@@ -221,8 +247,11 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
/* Pin early to prevent the shrinker/eviction logic from destroying
* our vma as we insert and bind.
*/
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+ if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
+ }
return __i915_vma_do_pin(vma, size, alignment, flags);
}
@@ -282,7 +311,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
*/
static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
i915_vma_unpin(vma);
}
@@ -311,7 +340,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
static inline bool
i915_vma_pin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
if (vma->fence) {
vma->fence->pin_count++;
return true;
@@ -330,7 +359,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
if (vma->fence) {
GEM_BUG_ON(vma->fence->pin_count <= 0);
vma->fence->pin_count--;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index c5a166752eda..aa9160e7f1d8 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -265,37 +265,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
return 0;
}
-static void
-intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll_config *shared_dpll)
-{
- enum intel_dpll_id i;
-
- /* Copy shared dpll state */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- shared_dpll[i] = pll->config;
- }
-}
-
-struct intel_shared_dpll_config *
-intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
-{
- struct intel_atomic_state *state = to_intel_atomic_state(s);
-
- WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
-
- if (!state->dpll_set) {
- state->dpll_set = true;
-
- intel_atomic_duplicate_dpll_state(to_i915(s->dev),
- state->shared_dpll);
- }
-
- return state->shared_dpll;
-}
-
struct drm_atomic_state *
intel_atomic_state_alloc(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 8d3e515f27ba..41fd94e62d3c 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -123,36 +123,24 @@ intel_plane_destroy_state(struct drm_plane *plane,
drm_atomic_helper_plane_destroy_state(plane, state);
}
-static int intel_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *intel_state)
{
+ struct drm_plane *plane = intel_state->base.plane;
struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_crtc *crtc = state->crtc;
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *crtc_state;
+ struct drm_plane_state *state = &intel_state->base;
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_plane_state *intel_state = to_intel_plane_state(state);
- struct drm_crtc_state *drm_crtc_state;
int ret;
- crtc = crtc ? crtc : plane->state->crtc;
- intel_crtc = to_intel_crtc(crtc);
-
/*
* Both crtc and plane->crtc could be NULL if we're updating a
* property while the plane is disabled. We don't actually have
* anything driver-specific we need to test in that case, so
* just return success.
*/
- if (!crtc)
+ if (!intel_state->base.crtc && !plane->state->crtc)
return 0;
- drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
- if (WARN_ON(!drm_crtc_state))
- return -EINVAL;
-
- crtc_state = to_intel_crtc_state(drm_crtc_state);
-
/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
@@ -175,11 +163,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
* RGB 16-bit 5:6:5, and Indexed 8-bit.
* TBD: Add RGB64 case once its added in supported format list.
*/
- switch (state->fb->pixel_format) {
+ switch (state->fb->format->format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(state->fb->pixel_format,
+ drm_get_format_name(state->fb->format->format,
&format_name));
return -EINVAL;
@@ -204,6 +192,31 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
return intel_plane_atomic_calc_changes(&crtc_state->base, state);
}
+static int intel_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *drm_crtc_state;
+
+ crtc = crtc ? crtc : plane->state->crtc;
+
+ /*
+ * Both crtc and plane->crtc could be NULL if we're updating a
+ * property while the plane is disabled. We don't actually have
+ * anything driver-specific we need to test in that case, so
+ * just return success.
+ */
+ if (!crtc)
+ return 0;
+
+ drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (WARN_ON(!drm_crtc_state))
+ return -EINVAL;
+
+ return intel_plane_atomic_check_with_state(to_intel_crtc_state(drm_crtc_state),
+ to_intel_plane_state(state));
+}
+
static void intel_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 892169b7952b..d76f3033e890 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -755,25 +755,49 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
return dev_priv->cdclk_freq;
}
+/*
+ * get the intel_encoder according to the parameter port and pipe
+ * intel_encoder is saved by the index of pipe
+ * MST & (pipe >= 0): return the av_enc_map[pipe],
+ * when port is matched
+ * MST & (pipe < 0): this is invalid
+ * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
+ * will get the right intel_encoder with port matched
+ * Non-MST & (pipe < 0): get the right intel_encoder with port matched
+ */
static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
int port, int pipe)
{
+ struct intel_encoder *encoder;
if (WARN_ON(pipe >= I915_MAX_PIPES))
return NULL;
/* MST */
- if (pipe >= 0)
- return dev_priv->av_enc_map[pipe];
+ if (pipe >= 0) {
+ encoder = dev_priv->av_enc_map[pipe];
+ /*
+ * when bootup, audio driver may not know it is
+ * MST or not. So it will poll all the port & pipe
+ * combinations
+ */
+ if (encoder != NULL && encoder->port == port &&
+ encoder->type == INTEL_OUTPUT_DP_MST)
+ return encoder;
+ }
/* Non-MST */
- for_each_pipe(dev_priv, pipe) {
- struct intel_encoder *encoder;
+ if (pipe > 0)
+ return NULL;
+ for_each_pipe(dev_priv, pipe) {
encoder = dev_priv->av_enc_map[pipe];
if (encoder == NULL)
continue;
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
if (port == encoder->port)
return encoder;
}
@@ -799,9 +823,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
/* 1. get the pipe */
intel_encoder = get_saved_enc(dev_priv, port, pipe);
- if (!intel_encoder || !intel_encoder->base.crtc ||
- (intel_encoder->type != INTEL_OUTPUT_HDMI &&
- intel_encoder->type != INTEL_OUTPUT_DP)) {
+ if (!intel_encoder || !intel_encoder->base.crtc) {
DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
@@ -924,6 +946,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ return;
+
ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 7ffab1abc518..e144f033f4b5 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -114,16 +114,18 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
- dvo_timing->hsync_pulse_width;
+ ((dvo_timing->hsync_pulse_width_hi << 8) |
+ dvo_timing->hsync_pulse_width_lo);
panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
dvo_timing->vactive_lo;
panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
- dvo_timing->vsync_off;
+ ((dvo_timing->vsync_off_hi << 4) | dvo_timing->vsync_off_lo);
panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
- dvo_timing->vsync_pulse_width;
+ ((dvo_timing->vsync_pulse_width_hi << 4) |
+ dvo_timing->vsync_pulse_width_lo);
panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
panel_fixed_mode->clock = dvo_timing->clock * 10;
@@ -330,17 +332,19 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
method = &backlight_data->backlight_control[panel_type];
dev_priv->vbt.backlight.type = method->type;
+ dev_priv->vbt.backlight.controller = method->controller;
}
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
- "active %s, min brightness %u, level %u\n",
+ "active %s, min brightness %u, level %u, controller %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
dev_priv->vbt.backlight.min_brightness,
- backlight_data->level[panel_type]);
+ backlight_data->level[panel_type],
+ dev_priv->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
@@ -1159,6 +1163,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
+ info->supports_edp = is_edp;
DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
@@ -1411,13 +1416,16 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return false;
}
- if (vbt->bdb_offset + sizeof(struct bdb_header) > size) {
+ if (range_overflows_t(size_t,
+ vbt->bdb_offset,
+ sizeof(struct bdb_header),
+ size)) {
DRM_DEBUG_DRIVER("BDB header incomplete\n");
return false;
}
bdb = get_bdb_header(vbt);
- if (vbt->bdb_offset + bdb->bdb_size > size) {
+ if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
DRM_DEBUG_DRIVER("BDB incomplete\n");
return false;
}
@@ -1662,6 +1670,9 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
};
int i;
+ if (HAS_DDI(dev_priv))
+ return dev_priv->vbt.ddi_port_info[port].supports_edp;
+
if (!dev_priv->vbt.child_dev_num)
return false;
@@ -1779,7 +1790,7 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
{
int i;
- if (WARN_ON_ONCE(!IS_BROXTON(dev_priv)))
+ if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv)))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index c9c46a538edb..fcfa423d08bd 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -154,7 +154,7 @@ static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
static inline struct intel_wait *to_wait(struct rb_node *node)
{
- return container_of(node, struct intel_wait, node);
+ return rb_entry(node, struct intel_wait, node);
}
static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
@@ -427,7 +427,7 @@ static bool signal_complete(struct drm_i915_gem_request *request)
static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
{
- return container_of(rb, struct drm_i915_gem_request, signaling.node);
+ return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
}
static void signaler_set_rtpriority(void)
@@ -623,6 +623,12 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ /* The engines should be idle and all requests accounted for! */
+ WARN_ON(READ_ONCE(b->first_wait));
+ WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
+ WARN_ON(READ_ONCE(b->first_signal));
+ WARN_ON(!RB_EMPTY_ROOT(&b->signals));
+
if (!IS_ERR_OR_NULL(b->signaler))
kthread_stop(b->signaler);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 588470eb8d39..2bf5aca6e37c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -837,12 +837,11 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-void intel_crt_init(struct drm_device *dev)
+void intel_crt_init(struct drm_i915_private *dev_priv)
{
struct drm_connector *connector;
struct intel_crt *crt;
struct intel_connector *intel_connector;
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t adpa_reg;
u32 adpa;
@@ -882,10 +881,10 @@ void intel_crt_init(struct drm_device *dev)
connector = &intel_connector->base;
crt->connector = intel_connector;
- drm_connector_init(dev, &intel_connector->base,
+ drm_connector_init(&dev_priv->drm, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
+ drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC, "CRT");
intel_connector_attach_encoder(intel_connector, &crt->base);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index d7a04bca8c28..0085bc745f6a 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,6 +34,10 @@
* low-power state and comes back to normal.
*/
+#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
+MODULE_FIRMWARE(I915_CSR_GLK);
+#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
@@ -286,7 +290,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
- if (IS_KABYLAKE(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv)) {
+ required_version = GLK_CSR_VERSION_REQUIRED;
+ } else if (IS_KABYLAKE(dev_priv)) {
required_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
required_version = SKL_CSR_VERSION_REQUIRED;
@@ -389,7 +395,7 @@ static void csr_load_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
struct intel_csr *csr;
- const struct firmware *fw;
+ const struct firmware *fw = NULL;
int ret;
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
@@ -405,7 +411,7 @@ static void csr_load_work_fn(struct work_struct *work)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- DRM_INFO("Finished loading %s (v%u.%u)\n",
+ DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
dev_priv->csr.fw_path,
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
@@ -435,7 +441,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (IS_KABYLAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
+ csr->fw_path = I915_CSR_GLK;
+ else if (IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_KBL;
else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 10ec9d4b7d45..66b367d0771a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -442,7 +442,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return hdmi_level;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
@@ -484,7 +484,7 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
const struct ddi_buf_trans *ddi_translations_edp;
const struct ddi_buf_trans *ddi_translations;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return;
if (IS_KABYLAKE(dev_priv)) {
@@ -567,7 +567,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
enum port port = intel_ddi_get_encoder_port(encoder);
const struct ddi_buf_trans *ddi_translations_hdmi;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return;
hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
@@ -1057,7 +1057,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
return 0;
pll = &dev_priv->shared_dplls[dpll];
- state = &pll->config.hw_state;
+ state = &pll->state.hw_state;
clock.m1 = 2;
clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
@@ -1091,7 +1091,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
}
@@ -1153,7 +1153,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else
@@ -1429,7 +1429,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
out:
- if (ret && IS_BROXTON(dev_priv)) {
+ if (ret && IS_GEN9_LP(dev_priv)) {
tmp = I915_READ(BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
@@ -1643,7 +1643,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_set_iboost(encoder, level);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
return DDI_BUF_TRANS_SELECT(level);
@@ -1701,7 +1701,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
bool has_hdmi_sink,
- struct drm_display_mode *adjusted_mode,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
struct intel_shared_dpll *pll)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
@@ -1715,13 +1716,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_prepare_hdmi_ddi_buffers(encoder);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_set_iboost(encoder, level);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port,
INTEL_OUTPUT_HDMI);
intel_hdmi->set_infoframes(drm_encoder,
has_hdmi_sink,
- adjusted_mode);
+ crtc_state, conn_state);
}
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
@@ -1742,8 +1743,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
}
if (type == INTEL_OUTPUT_HDMI) {
intel_ddi_pre_enable_hdmi(intel_encoder,
- crtc->config->has_hdmi_sink,
- &crtc->config->base.adjusted_mode,
+ pipe_config->has_hdmi_sink,
+ pipe_config, conn_state,
crtc->config->shared_dpll);
}
}
@@ -1949,6 +1950,19 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
udelay(600);
}
+bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc)
+{
+ u32 temp;
+
+ if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
+ return true;
+ }
+ return false;
+}
+
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -2014,11 +2028,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
- if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
- temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
- pipe_config->has_audio = true;
- }
+ pipe_config->has_audio =
+ intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
@@ -2042,7 +2053,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_clock_get(encoder, pipe_config);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
}
@@ -2066,7 +2077,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
else
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
- if (IS_BROXTON(dev_priv) && ret)
+ if (IS_GEN9_LP(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
pipe_config->lane_count);
@@ -2123,10 +2134,10 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_shared_dpll *pll = NULL;
- struct intel_shared_dpll_config tmp_pll_config;
+ struct intel_shared_dpll_state tmp_pll_state;
enum intel_dpll_id dpll_id;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
dpll_id = (enum intel_dpll_id)dig_port->port;
/*
* Select the required PLL. This works for platforms where
@@ -2139,11 +2150,11 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
pll->active_mask);
return NULL;
}
- tmp_pll_config = pll->config;
+ tmp_pll_state = pll->state;
if (!bxt_ddi_dp_set_dpll_hw_state(clock,
- &pll->config.hw_state)) {
+ &pll->state.hw_state)) {
DRM_ERROR("Could not setup DPLL\n");
- pll->config = tmp_pll_config;
+ pll->state = tmp_pll_state;
return NULL;
}
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
@@ -2154,9 +2165,8 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
return pll;
}
-void intel_ddi_init(struct drm_device *dev, enum port port)
+void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -2218,12 +2228,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
- drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+ drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
@@ -2244,7 +2254,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* configuration so that we use the proper lane count for our
* calculations.
*/
- if (IS_BROXTON(dev_priv) && port == PORT_A) {
+ if (IS_GEN9_LP(dev_priv) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 185e3bbc9ec9..fcf81815daff 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -24,11 +24,51 @@
#include "i915_drv.h"
+#define PLATFORM_NAME(x) [INTEL_##x] = #x
+static const char * const platform_names[] = {
+ PLATFORM_NAME(I830),
+ PLATFORM_NAME(I845G),
+ PLATFORM_NAME(I85X),
+ PLATFORM_NAME(I865G),
+ PLATFORM_NAME(I915G),
+ PLATFORM_NAME(I915GM),
+ PLATFORM_NAME(I945G),
+ PLATFORM_NAME(I945GM),
+ PLATFORM_NAME(G33),
+ PLATFORM_NAME(PINEVIEW),
+ PLATFORM_NAME(I965G),
+ PLATFORM_NAME(I965GM),
+ PLATFORM_NAME(G45),
+ PLATFORM_NAME(GM45),
+ PLATFORM_NAME(IRONLAKE),
+ PLATFORM_NAME(SANDYBRIDGE),
+ PLATFORM_NAME(IVYBRIDGE),
+ PLATFORM_NAME(VALLEYVIEW),
+ PLATFORM_NAME(HASWELL),
+ PLATFORM_NAME(BROADWELL),
+ PLATFORM_NAME(CHERRYVIEW),
+ PLATFORM_NAME(SKYLAKE),
+ PLATFORM_NAME(BROXTON),
+ PLATFORM_NAME(KABYLAKE),
+ PLATFORM_NAME(GEMINILAKE),
+};
+#undef PLATFORM_NAME
+
+const char *intel_platform_name(enum intel_platform platform)
+{
+ if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
+ platform_names[platform] == NULL))
+ return "<unknown>";
+
+ return platform_names[platform];
+}
+
void intel_device_info_dump(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = &dev_priv->info;
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x",
+ DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x",
+ intel_platform_name(info->platform),
info->gen,
dev_priv->drm.pdev->device,
dev_priv->drm.pdev->revision);
@@ -152,7 +192,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
hweight8(sseu->slice_mask) > 1;
sseu->has_subslice_pg =
- IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1;
+ IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_BROXTON(dev_priv)) {
@@ -270,6 +310,12 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
struct intel_device_info *info = mkwrite_device_info(dev_priv);
enum pipe pipe;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ info->num_scalers[PIPE_A] = 2;
+ info->num_scalers[PIPE_B] = 2;
+ info->num_scalers[PIPE_C] = 1;
+ }
+
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
@@ -278,7 +324,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
* we don't expose the topmost plane at all to prevent ABI breakage
* down the line.
*/
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv))
+ for_each_pipe(dev_priv, pipe)
+ info->num_sprites[pipe] = 3;
+ else if (IS_BROXTON(dev_priv)) {
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 891c86aef99d..01341670738f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -115,15 +115,15 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
-static void skl_init_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
+static void intel_crtc_init_scalers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
static int ilk_max_pixel_rate(struct drm_atomic_state *state);
+static int glk_calc_cdclk(int max_pixclk);
static int bxt_calc_cdclk(int max_pixclk);
struct intel_limit {
@@ -614,12 +614,12 @@ static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
INTELPllInvalid("m1 out of range\n");
if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
+ !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv)) {
+ !IS_GEN9_LP(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -1232,7 +1232,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
{
bool cur_state;
- if (IS_845G(dev_priv) || IS_I865G(dev_priv))
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -1327,7 +1327,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_sprite(dev_priv, pipe, sprite) {
- u32 val = I915_READ(SPCNTR(pipe, sprite));
+ u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
I915_STATE_WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
@@ -2137,11 +2137,10 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
const struct drm_framebuffer *fb,
unsigned int rotation)
{
+ view->type = I915_GGTT_VIEW_NORMAL;
if (drm_rotation_90_or_270(rotation)) {
- *view = i915_ggtt_view_rotated;
- view->params.rotated = to_intel_framebuffer(fb)->rot_info;
- } else {
- *view = i915_ggtt_view_normal;
+ view->type = I915_GGTT_VIEW_ROTATED;
+ view->rotated = to_intel_framebuffer(fb)->rot_info;
}
}
@@ -2149,7 +2148,7 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return 256 * 1024;
- else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
+ else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return 128 * 1024;
else if (INTEL_INFO(dev_priv)->gen >= 4)
@@ -2243,10 +2242,7 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
-
- if (WARN_ON_ONCE(!vma))
- return;
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
@@ -2273,7 +2269,7 @@ u32 intel_fb_xy_to_linear(int x, int y,
int plane)
{
const struct drm_framebuffer *fb = state->base.fb;
- unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int cpp = fb->format->cpp[plane];
unsigned int pitch = fb->pitches[plane];
return y * pitch + x * cpp;
@@ -2342,7 +2338,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
{
const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
const struct drm_framebuffer *fb = state->base.fb;
- unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int cpp = fb->format->cpp[plane];
unsigned int rotation = state->base.rotation;
unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
@@ -2398,7 +2394,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
u32 alignment)
{
uint64_t fb_modifier = fb->modifier;
- unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int cpp = fb->format->cpp[plane];
u32 offset, offset_aligned;
if (alignment)
@@ -2453,7 +2449,7 @@ u32 intel_compute_tile_offset(int *x, int *y,
u32 alignment;
/* AUX_DIST needs only 4K alignment */
- if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
+ if (fb->format->format == DRM_FORMAT_NV12 && plane == 1)
alignment = 4096;
else
alignment = intel_surf_alignment(dev_priv, fb->modifier);
@@ -2466,7 +2462,7 @@ u32 intel_compute_tile_offset(int *x, int *y,
static void intel_fb_offset_to_xy(int *x, int *y,
const struct drm_framebuffer *fb, int plane)
{
- unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int cpp = fb->format->cpp[plane];
unsigned int pitch = fb->pitches[plane];
u32 linear_offset = fb->offsets[plane];
@@ -2494,8 +2490,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct intel_rotation_info *rot_info = &intel_fb->rot_info;
u32 gtt_offset_rotated = 0;
unsigned int max_size = 0;
- uint32_t format = fb->pixel_format;
- int i, num_planes = drm_format_num_planes(format);
+ int i, num_planes = fb->format->num_planes;
unsigned int tile_size = intel_tile_size(dev_priv);
for (i = 0; i < num_planes; i++) {
@@ -2504,9 +2499,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
u32 offset;
int x, y;
- cpp = drm_format_plane_cpp(format, i);
- width = drm_format_plane_width(fb->width, format, i);
- height = drm_format_plane_height(fb->height, format, i);
+ cpp = fb->format->cpp[i];
+ width = drm_framebuffer_plane_width(fb->width, fb, i);
+ height = drm_framebuffer_plane_height(fb->height, fb, i);
intel_fb_offset_to_xy(&x, &y, fb, i);
@@ -2688,7 +2683,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mutex_lock(&dev->struct_mutex);
- obj = i915_gem_object_create_stolen_for_preallocated(dev,
+ obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
base_aligned,
base_aligned,
size_aligned);
@@ -2700,7 +2695,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (plane_config->tiling == I915_TILING_X)
obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
- mode_cmd.pixel_format = fb->pixel_format;
+ mode_cmd.pixel_format = fb->format->format;
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
mode_cmd.pitches[0] = fb->pitches[0];
@@ -2795,7 +2790,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
- to_intel_plane_state(plane_state)->base.visible = false;
+ plane_state->visible = false;
crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
intel_pre_disable_primary_noatomic(&intel_crtc->base);
intel_plane->disable_plane(primary, &intel_crtc->base);
@@ -2844,7 +2839,7 @@ valid_fb:
static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
unsigned int rotation)
{
- int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ int cpp = fb->format->cpp[plane];
switch (fb->modifier) {
case DRM_FORMAT_MOD_NONE:
@@ -2923,7 +2918,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* TODO: linear and Y-tiled seem fine, Yf untested,
*/
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = fb->format->cpp[0];
while ((x + w) * cpp > fb->pitches[0]) {
if (offset == 0) {
@@ -2991,7 +2986,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* Handle the AUX surface first since
* the main surface setup depends on it.
*/
- if (fb->pixel_format == DRM_FORMAT_NV12) {
+ if (fb->format->format == DRM_FORMAT_NV12) {
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3046,7 +3041,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
@@ -3161,7 +3156,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
@@ -3275,12 +3270,12 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
* linear buffers or in number of tiles for tiled buffers.
*/
if (drm_rotation_90_or_270(rotation)) {
- int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ int cpp = fb->format->cpp[plane];
stride /= intel_tile_height(dev_priv, fb->modifier, cpp);
} else {
stride /= intel_fb_stride_alignment(dev_priv, fb->modifier,
- fb->pixel_format);
+ fb->format->format);
}
return stride;
@@ -3375,7 +3370,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- int pipe = intel_crtc->pipe;
+ enum plane_id plane_id = to_intel_plane(plane)->id;
+ enum pipe pipe = to_intel_plane(plane)->pipe;
u32 plane_ctl;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
@@ -3394,7 +3390,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
- plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
+ plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
plane_ctl |= skl_plane_ctl_rotation(rotation);
@@ -3410,30 +3406,30 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
intel_crtc->adjusted_x = src_x;
intel_crtc->adjusted_y = src_y;
- I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
- I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
- I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
- I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w);
+ I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
+ I915_WRITE(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
+ I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
if (scaler_id >= 0) {
uint32_t ps_ctrl = 0;
WARN_ON(!dst_w || !dst_h);
- ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
+ ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
crtc_state->scaler_state.scalers[scaler_id].mode;
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
- I915_WRITE(PLANE_POS(pipe, 0), 0);
+ I915_WRITE(PLANE_POS(pipe, plane_id), 0);
} else {
- I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
+ I915_WRITE(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
}
- I915_WRITE(PLANE_SURF(pipe, 0),
+ I915_WRITE(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- POSTING_READ(PLANE_SURF(pipe, 0));
+ POSTING_READ(PLANE_SURF(pipe, plane_id));
}
static void skylake_disable_primary_plane(struct drm_plane *primary,
@@ -3441,12 +3437,12 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum plane_id plane_id = to_intel_plane(primary)->id;
+ enum pipe pipe = to_intel_plane(primary)->pipe;
- I915_WRITE(PLANE_CTL(pipe, 0), 0);
- I915_WRITE(PLANE_SURF(pipe, 0), 0);
- POSTING_READ(PLANE_SURF(pipe, 0));
+ I915_WRITE(PLANE_CTL(pipe, plane_id), 0);
+ I915_WRITE(PLANE_SURF(pipe, plane_id), 0);
+ POSTING_READ(PLANE_SURF(pipe, plane_id));
}
/* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -3555,23 +3551,19 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
state = drm_atomic_helper_duplicate_state(dev, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
- state = NULL;
DRM_ERROR("Duplicating state failed with %i\n", ret);
- goto err;
+ return;
}
ret = drm_atomic_helper_disable_all(dev, ctx);
if (ret) {
DRM_ERROR("Suspending crtc's failed with %i\n", ret);
- goto err;
+ drm_atomic_state_put(state);
+ return;
}
dev_priv->modeset_restore_state = state;
state->acquire_ctx = ctx;
- return;
-
-err:
- drm_atomic_state_put(state);
}
void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -4224,9 +4216,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}
-bool intel_has_pending_fb_unpin(struct drm_device *dev)
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
/* Note that we don't need to be called with mode_config.lock here
@@ -4236,7 +4227,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
* cannot claim and pin a new fb without at least acquring the
* struct_mutex and so serialising with us.
*/
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
if (atomic_read(&crtc->unpin_work_count) == 0)
continue;
@@ -4766,7 +4757,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
}
/* Check src format */
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
@@ -4782,7 +4773,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
default:
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
intel_plane->base.base.id, intel_plane->base.name,
- fb->base.id, fb->pixel_format);
+ fb->base.id, fb->format->format);
return -EINVAL;
}
@@ -5017,11 +5008,9 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH_DISPLAY(dev_priv)) {
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->wm.vlv.cxsr = false;
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
+ intel_set_memory_cxsr(dev_priv, false))
intel_wait_for_vblank(dev_priv, pipe);
- }
}
static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
@@ -5096,11 +5085,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (old_crtc_state->base.active) {
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->wm.vlv.cxsr = false;
+ if (old_crtc_state->base.active &&
+ intel_set_memory_cxsr(dev_priv, false))
intel_wait_for_vblank(dev_priv, crtc->pipe);
- }
}
/*
@@ -5110,10 +5097,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (pipe_config->disable_lp_wm) {
- ilk_disable_lp_wm(dev);
+ if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
intel_wait_for_vblank(dev_priv, crtc->pipe);
- }
/*
* If we're doing a modeset, we're done. No need to do any pre-vblank
@@ -5461,10 +5446,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_ddi_enable_transcoder_func(crtc);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state,
- pipe_config);
- else
- intel_update_watermarks(intel_crtc);
+ dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
@@ -5801,8 +5783,10 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
- if (INTEL_INFO(dev_priv)->gen >= 9 ||
- IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
+ return 2 * max_cdclk_freq;
+ else if (INTEL_INFO(dev_priv)->gen >= 9 ||
+ IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
return max_cdclk_freq*95/100;
@@ -5838,6 +5822,8 @@ static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
max_cdclk = 308571;
dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ dev_priv->max_cdclk_freq = 316800;
} else if (IS_BROXTON(dev_priv)) {
dev_priv->max_cdclk_freq = 624000;
} else if (IS_BROADWELL(dev_priv)) {
@@ -5925,6 +5911,26 @@ static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
return dev_priv->cdclk_pll.ref * ratio;
}
+static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+ int ratio;
+
+ if (cdclk == dev_priv->cdclk_pll.ref)
+ return 0;
+
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
+ case 79200:
+ case 158400:
+ case 316800:
+ ratio = 33;
+ break;
+ }
+
+ return dev_priv->cdclk_pll.ref * ratio;
+}
+
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
@@ -5966,7 +5972,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
u32 val, divider;
int vco, ret;
- vco = bxt_de_pll_vco(dev_priv, cdclk);
+ if (IS_GEMINILAKE(dev_priv))
+ vco = glk_de_pll_vco(dev_priv, cdclk);
+ else
+ vco = bxt_de_pll_vco(dev_priv, cdclk);
DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
@@ -5979,6 +5988,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
case 3:
+ WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
case 2:
@@ -6088,6 +6098,8 @@ sanitize:
void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
+ int cdclk;
+
bxt_sanitize_cdclk(dev_priv);
if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
@@ -6098,7 +6110,12 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv)
* - The initial CDCLK needs to be read from VBT.
* Need to make this change after VBT has changes for BXT.
*/
- bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
+ if (IS_GEMINILAKE(dev_priv))
+ cdclk = glk_calc_cdclk(0);
+ else
+ cdclk = bxt_calc_cdclk(0);
+
+ bxt_set_cdclk(dev_priv, cdclk);
}
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
@@ -6513,6 +6530,16 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
return 200000;
}
+static int glk_calc_cdclk(int max_pixclk)
+{
+ if (max_pixclk > 2 * 158400)
+ return 316800;
+ else if (max_pixclk > 2 * 79200)
+ return 158400;
+ else
+ return 79200;
+}
+
static int bxt_calc_cdclk(int max_pixclk)
{
if (max_pixclk > 576000)
@@ -6575,15 +6602,27 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
int max_pixclk = ilk_max_pixel_rate(state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
+ int cdclk;
- intel_state->cdclk = intel_state->dev_cdclk =
- bxt_calc_cdclk(max_pixclk);
+ if (IS_GEMINILAKE(dev_priv))
+ cdclk = glk_calc_cdclk(max_pixclk);
+ else
+ cdclk = bxt_calc_cdclk(max_pixclk);
- if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = bxt_calc_cdclk(0);
+ intel_state->cdclk = intel_state->dev_cdclk = cdclk;
+
+ if (!intel_state->active_crtcs) {
+ if (IS_GEMINILAKE(dev_priv))
+ cdclk = glk_calc_cdclk(0);
+ else
+ cdclk = bxt_calc_cdclk(0);
+
+ intel_state->dev_cdclk = cdclk;
+ }
return 0;
}
@@ -6833,13 +6872,13 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
- if (to_intel_plane_state(crtc->primary->state)->base.visible) {
+ if (crtc->primary->state->visible) {
WARN_ON(intel_crtc->flip_work);
intel_pre_disable_primary_noatomic(crtc);
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
- to_intel_plane_state(crtc->primary->state)->base.visible = false;
+ crtc->primary->state->visible = false;
}
state = drm_atomic_state_alloc(crtc->dev);
@@ -7291,6 +7330,7 @@ static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+ WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
@@ -7510,7 +7550,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
vco_table = ctg_vco;
else if (IS_G4X(dev_priv))
vco_table = elk_vco;
- else if (IS_CRESTLINE(dev_priv))
+ else if (IS_I965GM(dev_priv))
vco_table = cl_vco;
else if (IS_PINEVIEW(dev_priv))
vco_table = pnv_vco;
@@ -8122,7 +8162,8 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -8357,7 +8398,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
mode->type = DRM_MODE_TYPE_DRIVER;
mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
- mode->flags |= pipe_config->base.adjusted_mode.flags;
mode->hsync = drm_mode_hsync(mode);
mode->vrefresh = drm_mode_vrefresh(mode);
@@ -8696,6 +8736,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
+ fb->dev = dev;
+
if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
@@ -8705,8 +8747,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->pixel_format = fourcc;
- fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+ fb->format = drm_format_info(fourcc);
if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
@@ -8727,14 +8768,14 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(dev, fb->height,
- fb->pixel_format,
+ fb->format->format,
fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), plane, fb->width, fb->height,
- fb->bits_per_pixel, base, fb->pitches[0],
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
plane_config->size);
plane_config->fb = intel_fb;
@@ -8835,7 +8876,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv)) {
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
@@ -8888,9 +8929,8 @@ out:
return ret;
}
-static void ironlake_init_pch_refclk(struct drm_device *dev)
+static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
int i;
u32 val, final;
@@ -8902,7 +8942,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
bool using_ssc_source = false;
/* We need to take the global config into account */
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
has_panel = true;
@@ -9158,10 +9198,9 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
* - Sequence to enable CLKOUT_DP without spread
* - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
*/
-static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
- bool with_fdi)
+static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+ bool with_spread, bool with_fdi)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t reg, tmp;
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
@@ -9199,9 +9238,8 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
}
/* Sequence to disable CLKOUT_DP */
-static void lpt_disable_clkout_dp(struct drm_device *dev)
+static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t reg, tmp;
mutex_lock(&dev_priv->sb_lock);
@@ -9286,12 +9324,12 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
#undef BEND_IDX
-static void lpt_init_pch_refclk(struct drm_device *dev)
+static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
bool has_vga = false;
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_vga = true;
@@ -9302,24 +9340,22 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
}
if (has_vga) {
- lpt_bend_clkout_dp(to_i915(dev), 0);
- lpt_enable_clkout_dp(dev, true, true);
+ lpt_bend_clkout_dp(dev_priv, 0);
+ lpt_enable_clkout_dp(dev_priv, true, true);
} else {
- lpt_disable_clkout_dp(dev);
+ lpt_disable_clkout_dp(dev_priv);
}
}
/*
* Initialize reference clocks when the driver loads
*/
-void intel_init_pch_refclk(struct drm_device *dev)
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ironlake_init_pch_refclk(dev);
+ ironlake_init_pch_refclk(dev_priv);
else if (HAS_PCH_LPT(dev_priv))
- lpt_init_pch_refclk(dev);
+ lpt_init_pch_refclk(dev_priv);
}
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
@@ -9726,6 +9762,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
+ fb->dev = dev;
+
val = I915_READ(PLANE_CTL(pipe, 0));
if (!(val & PLANE_CTL_ENABLE))
goto error;
@@ -9734,8 +9772,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX,
val & PLANE_CTL_ALPHA_MASK);
- fb->pixel_format = fourcc;
- fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+ fb->format = drm_format_info(fourcc);
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
@@ -9768,18 +9805,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
val = I915_READ(PLANE_STRIDE(pipe, 0));
stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier,
- fb->pixel_format);
+ fb->format->format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(dev, fb->height,
- fb->pixel_format,
+ fb->format->format,
fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), fb->width, fb->height,
- fb->bits_per_pixel, base, fb->pitches[0],
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
plane_config->size);
plane_config->fb = intel_fb;
@@ -9838,6 +9875,8 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
+ fb->dev = dev;
+
if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
@@ -9847,8 +9886,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->pixel_format = fourcc;
- fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+ fb->format = drm_format_info(fourcc);
base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -9869,14 +9907,14 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(dev, fb->height,
- fb->pixel_format,
+ fb->format->format,
fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), fb->width, fb->height,
- fb->bits_per_pixel, base, fb->pitches[0],
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
plane_config->size);
plane_config->fb = intel_fb;
@@ -10166,7 +10204,6 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
uint32_t val;
DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -10177,19 +10214,18 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
- lpt_disable_clkout_dp(dev);
+ lpt_disable_clkout_dp(dev_priv);
hsw_disable_lcpll(dev_priv, true, true);
}
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
uint32_t val;
DRM_DEBUG_KMS("Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
- lpt_init_pch_refclk(dev);
+ lpt_init_pch_refclk(dev_priv);
if (HAS_PCH_LPT_LP(dev_priv)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
@@ -10639,7 +10675,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10684,7 +10720,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
- if (IS_BROXTON(dev_priv) &&
+ if (IS_GEN9_LP(dev_priv) &&
bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
WARN_ON(active);
active = true;
@@ -10704,7 +10740,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
if (INTEL_GEN(dev_priv) >= 9) {
- skl_init_scalers(dev_priv, crtc, pipe_config);
+ intel_crtc_init_scalers(crtc, pipe_config);
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
@@ -10885,7 +10921,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
I915_WRITE(CURPOS(pipe), pos);
- if (IS_845G(dev_priv) || IS_I865G(dev_priv))
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
i845_update_cursor(crtc, base, plane_state);
else
i9xx_update_cursor(crtc, base, plane_state);
@@ -10903,11 +10939,11 @@ static bool cursor_size_ok(struct drm_i915_private *dev_priv,
* the precision of the register. Everything else requires
* square cursors, limited to a few power-of-two sizes.
*/
- if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
if ((width & 63) != 0)
return false;
- if (width > (IS_845G(dev_priv) ? 64 : 512))
+ if (width > (IS_I845G(dev_priv) ? 64 : 512))
return false;
if (height > 1023)
@@ -10997,7 +11033,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- obj = i915_gem_object_create(dev,
+ obj = i915_gem_object_create(to_i915(dev),
intel_framebuffer_size_for_mode(mode, bpp));
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -11035,7 +11071,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
fb = &dev_priv->fbdev->fb->base;
if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->bits_per_pixel))
+ fb->format->cpp[0] * 8))
return NULL;
if (obj->base.size < mode->vdisplay * fb->pitches[0])
@@ -11063,7 +11099,7 @@ static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
return PTR_ERR(plane_state);
if (mode)
- drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
+ drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
else
hdisplay = vdisplay = 0;
@@ -12138,7 +12174,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -EBUSY;
/* Can't change pixel format via MI display flips. */
- if (fb->pixel_format != crtc->primary->fb->pixel_format)
+ if (fb->format != crtc->primary->fb->format)
return -EINVAL;
/*
@@ -12257,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
queue_work(system_unbound_wq, &work->mmio_work);
} else {
- request = i915_gem_request_alloc(engine, engine->last_context);
+ request = i915_gem_request_alloc(engine,
+ dev_priv->kernel_context);
if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin;
@@ -12424,7 +12461,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
}
was_visible = old_plane_state->base.visible;
- visible = to_intel_plane_state(plane_state)->base.visible;
+ visible = plane_state->visible;
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
@@ -12440,7 +12477,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled)
- to_intel_plane_state(plane_state)->base.visible = visible = false;
+ plane_state->visible = visible = false;
if (!was_visible && !visible)
return 0;
@@ -12786,39 +12823,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
pipe_config->ips_enabled, pipe_config->double_wide);
- if (IS_BROXTON(dev_priv)) {
- DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
- "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
- "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
- pipe_config->dpll_hw_state.ebb0,
- pipe_config->dpll_hw_state.ebb4,
- pipe_config->dpll_hw_state.pll0,
- pipe_config->dpll_hw_state.pll1,
- pipe_config->dpll_hw_state.pll2,
- pipe_config->dpll_hw_state.pll3,
- pipe_config->dpll_hw_state.pll6,
- pipe_config->dpll_hw_state.pll8,
- pipe_config->dpll_hw_state.pll9,
- pipe_config->dpll_hw_state.pll10,
- pipe_config->dpll_hw_state.pcsdw12);
- } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- DRM_DEBUG_KMS("dpll_hw_state: "
- "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
- pipe_config->dpll_hw_state.ctrl1,
- pipe_config->dpll_hw_state.cfgcr1,
- pipe_config->dpll_hw_state.cfgcr2);
- } else if (HAS_DDI(dev_priv)) {
- DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
- pipe_config->dpll_hw_state.wrpll,
- pipe_config->dpll_hw_state.spll);
- } else {
- DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- pipe_config->dpll_hw_state.dpll,
- pipe_config->dpll_hw_state.dpll_md,
- pipe_config->dpll_hw_state.fp0,
- pipe_config->dpll_hw_state.fp1);
- }
+ intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
DRM_DEBUG_KMS("planes on this crtc\n");
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -12838,7 +12843,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
plane->base.id, plane->name,
fb->base.id, fb->width, fb->height,
- drm_get_format_name(fb->pixel_format, &format_name));
+ drm_get_format_name(fb->format->format, &format_name));
if (INTEL_GEN(dev_priv) >= 9)
DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
state->scaler_id,
@@ -12983,7 +12988,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_crtc_get_hv_timing(&pipe_config->base.mode,
+ drm_mode_get_hv_timing(&pipe_config->base.mode,
&pipe_config->pipe_src_w,
&pipe_config->pipe_src_h);
@@ -13162,6 +13167,31 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
return false;
}
+static void __printf(3, 4)
+pipe_config_err(bool adjust, const char *name, const char *format, ...)
+{
+ char *level;
+ unsigned int category;
+ struct va_format vaf;
+ va_list args;
+
+ if (adjust) {
+ level = KERN_DEBUG;
+ category = DRM_UT_KMS;
+ } else {
+ level = KERN_ERR;
+ category = DRM_UT_NONE;
+ }
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ drm_printk(level, category, "mismatch in %s %pV", name, &vaf);
+
+ va_end(args);
+}
+
static bool
intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *current_config,
@@ -13170,17 +13200,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
{
bool ret = true;
-#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
- do { \
- if (!adjust) \
- DRM_ERROR(fmt, ##__VA_ARGS__); \
- else \
- DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
- } while (0)
-
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected 0x%08x, found 0x%08x)\n", \
current_config->name, \
pipe_config->name); \
@@ -13189,7 +13211,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_I(name) \
if (current_config->name != pipe_config->name) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
@@ -13198,7 +13220,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_P(name) \
if (current_config->name != pipe_config->name) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected %p, found %p)\n", \
current_config->name, \
pipe_config->name); \
@@ -13209,7 +13231,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
adjust)) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
"found tu %i, gmch %i/%i link %i/%i)\n", \
current_config->name.tu, \
@@ -13235,7 +13257,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
&pipe_config->name, adjust) && \
!intel_compare_link_m_n(&current_config->alt_name, \
&pipe_config->name, adjust)) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
"or tu %i gmch %i/%i link %i/%i, " \
"found tu %i, gmch %i/%i link %i/%i)\n", \
@@ -13259,8 +13281,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
- "(expected %i, found %i)\n", \
+ pipe_config_err(adjust, __stringify(name), \
+ "(%x) (expected %i, found %i)\n", \
+ (mask), \
current_config->name & (mask), \
pipe_config->name & (mask)); \
ret = false; \
@@ -13268,7 +13291,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
@@ -13385,7 +13408,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
-#undef INTEL_ERR_OR_DBG_KMS
return ret;
}
@@ -13686,9 +13708,9 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
}
if (!crtc) {
- I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
+ I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
"more active pll users than references: %x vs %x\n",
- pll->active_mask, pll->config.crtc_mask);
+ pll->active_mask, pll->state.crtc_mask);
return;
}
@@ -13704,11 +13726,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
pipe_name(drm_crtc_index(crtc)), pll->active_mask);
- I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
+ I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
- crtc_mask, pll->config.crtc_mask);
+ crtc_mask, pll->state.crtc_mask);
- I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
+ I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
&dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
@@ -13734,7 +13756,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask)\n",
pipe_name(drm_crtc_index(crtc)));
- I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
+ I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
"pll enabled crtcs mismatch (found %x in enabled mask)\n",
pipe_name(drm_crtc_index(crtc)));
}
@@ -13817,7 +13839,6 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_shared_dpll_config *shared_dpll = NULL;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
@@ -13838,10 +13859,7 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
if (!old_dpll)
continue;
- if (!shared_dpll)
- shared_dpll = intel_atomic_get_shared_dpll_state(state);
-
- intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
+ intel_release_shared_dpll(old_dpll, intel_crtc, state);
}
}
@@ -13910,14 +13928,34 @@ static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
return 0;
}
+static int intel_lock_all_pipes(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+
+ /* Add all pipes to the state */
+ for_each_crtc(state->dev, crtc) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ return 0;
+}
+
static int intel_modeset_all_pipes(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- int ret = 0;
- /* add all active pipes to the state */
+ /*
+ * Add all pipes to the state, and force
+ * a modeset on all the active ones.
+ */
for_each_crtc(state->dev, crtc) {
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -13929,14 +13967,14 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
- break;
+ return ret;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
- break;
+ return ret;
}
- return ret;
+ return 0;
}
static int intel_modeset_checks(struct drm_atomic_state *state)
@@ -13982,12 +14020,24 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
if (ret < 0)
return ret;
+ /*
+ * Writes to dev_priv->atomic_cdclk_freq must protected by
+ * holding all the crtc locks, even if we don't end up
+ * touching the hardware
+ */
+ if (intel_state->cdclk != dev_priv->atomic_cdclk_freq) {
+ ret = intel_lock_all_pipes(state);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* All pipes must be switched off while we change the cdclk. */
if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
- intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
+ intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) {
ret = intel_modeset_all_pipes(state);
-
- if (ret < 0)
- return ret;
+ if (ret < 0)
+ return ret;
+ }
DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
intel_state->cdclk, intel_state->dev_cdclk);
@@ -14581,7 +14631,7 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_swap_state(state, true);
dev_priv->wm.distrust_bios_wm = false;
- intel_shared_dpll_commit(state);
+ intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
if (intel_state->modeset) {
@@ -14691,6 +14741,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
+ .set_crc_source = intel_crtc_set_crc_source,
};
/**
@@ -14949,6 +15000,141 @@ const struct drm_plane_funcs intel_plane_funcs = {
.atomic_destroy_state = intel_plane_destroy_state,
};
+static int
+intel_legacy_cursor_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ int ret;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *old_fb;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct i915_vma *old_vma;
+
+ /*
+ * When crtc is inactive or there is a modeset pending,
+ * wait for it to complete in the slowpath
+ */
+ if (!crtc_state->active || needs_modeset(crtc_state) ||
+ to_intel_crtc_state(crtc_state)->update_pipe)
+ goto slow;
+
+ old_plane_state = plane->state;
+
+ /*
+ * If any parameters change that may affect watermarks,
+ * take the slowpath. Only changing fb or position should be
+ * in the fastpath.
+ */
+ if (old_plane_state->crtc != crtc ||
+ old_plane_state->src_w != src_w ||
+ old_plane_state->src_h != src_h ||
+ old_plane_state->crtc_w != crtc_w ||
+ old_plane_state->crtc_h != crtc_h ||
+ !old_plane_state->visible ||
+ old_plane_state->fb->modifier != fb->modifier)
+ goto slow;
+
+ new_plane_state = intel_plane_duplicate_state(plane);
+ if (!new_plane_state)
+ return -ENOMEM;
+
+ drm_atomic_set_fb_for_plane(new_plane_state, fb);
+
+ new_plane_state->src_x = src_x;
+ new_plane_state->src_y = src_y;
+ new_plane_state->src_w = src_w;
+ new_plane_state->src_h = src_h;
+ new_plane_state->crtc_x = crtc_x;
+ new_plane_state->crtc_y = crtc_y;
+ new_plane_state->crtc_w = crtc_w;
+ new_plane_state->crtc_h = crtc_h;
+
+ ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
+ to_intel_plane_state(new_plane_state));
+ if (ret)
+ goto out_free;
+
+ /* Visibility changed, must take slowpath. */
+ if (!new_plane_state->visible)
+ goto slow_free;
+
+ ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+ if (ret)
+ goto out_free;
+
+ if (INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
+
+ ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to attach phys object\n");
+ goto out_unlock;
+ }
+ } else {
+ struct i915_vma *vma;
+
+ vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_KMS("failed to pin object\n");
+
+ ret = PTR_ERR(vma);
+ goto out_unlock;
+ }
+
+ to_intel_plane_state(new_plane_state)->vma = vma;
+ }
+
+ old_fb = old_plane_state->fb;
+ old_vma = to_intel_plane_state(old_plane_state)->vma;
+
+ i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
+ intel_plane->frontbuffer_bit);
+
+ /* Swap plane state */
+ new_plane_state->fence = old_plane_state->fence;
+ *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state);
+ new_plane_state->fence = NULL;
+ new_plane_state->fb = old_fb;
+ to_intel_plane_state(new_plane_state)->vma = old_vma;
+
+ intel_plane->update_plane(plane,
+ to_intel_crtc_state(crtc->state),
+ to_intel_plane_state(plane->state));
+
+ intel_cleanup_plane_fb(plane, new_plane_state);
+
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+out_free:
+ intel_plane_destroy_state(plane, new_plane_state);
+ return ret;
+
+slow_free:
+ intel_plane_destroy_state(plane, new_plane_state);
+slow:
+ return drm_atomic_helper_update_plane(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+}
+
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_legacy_cursor_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+};
+
static struct intel_plane *
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -14988,6 +15174,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->plane = (enum plane) !pipe;
else
primary->plane = (enum plane) pipe;
+ primary->id = PLANE_PRIMARY;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
@@ -15187,13 +15374,14 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
+ cursor->id = PLANE_CURSOR;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
cursor->check_plane = intel_check_cursor_plane;
cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- 0, &intel_plane_funcs,
+ 0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR,
@@ -15221,14 +15409,18 @@ fail:
return ERR_PTR(ret);
}
-static void skl_init_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static void intel_crtc_init_scalers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int i;
+ crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
+ if (!crtc->num_scalers)
+ return;
+
for (i = 0; i < crtc->num_scalers; i++) {
struct intel_scaler *scaler = &scaler_state->scalers[i];
@@ -15260,21 +15452,12 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_crtc->base.state = &crtc_state->base;
crtc_state->base.crtc = &intel_crtc->base;
- /* initialize shared scalers */
- if (INTEL_GEN(dev_priv) >= 9) {
- if (pipe == PIPE_C)
- intel_crtc->num_scalers = 1;
- else
- intel_crtc->num_scalers = SKL_NUM_SCALERS;
-
- skl_init_scalers(dev_priv, intel_crtc, crtc_state);
- }
-
primary = intel_primary_plane_create(dev_priv, pipe);
if (IS_ERR(primary)) {
ret = PTR_ERR(primary);
goto fail;
}
+ intel_crtc->plane_ids_mask |= BIT(primary->id);
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
@@ -15284,6 +15467,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = PTR_ERR(plane);
goto fail;
}
+ intel_crtc->plane_ids_mask |= BIT(plane->id);
}
cursor = intel_cursor_plane_create(dev_priv, pipe);
@@ -15291,6 +15475,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = PTR_ERR(cursor);
goto fail;
}
+ intel_crtc->plane_ids_mask |= BIT(cursor->id);
ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
&primary->base, &cursor->base,
@@ -15308,6 +15493,9 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_crtc->wm.cxsr_allowed = true;
+ /* initialize shared scalers */
+ intel_crtc_init_scalers(intel_crtc, crtc_state);
+
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
@@ -15444,7 +15632,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
static void intel_pps_init(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv))
+ if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
dev_priv->pps_mmio_base = PCH_PPS_BASE;
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->pps_mmio_base = VLV_PPS_BASE;
@@ -15454,9 +15642,8 @@ static void intel_pps_init(struct drm_i915_private *dev_priv)
intel_pps_unlock_regs_wa(dev_priv);
}
-static void intel_setup_outputs(struct drm_device *dev)
+static void intel_setup_outputs(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
@@ -15467,22 +15654,22 @@ static void intel_setup_outputs(struct drm_device *dev)
* prevent the registeration of both eDP and LVDS and the incorrect
* sharing of the PPS.
*/
- intel_lvds_init(dev);
+ intel_lvds_init(dev_priv);
if (intel_crt_present(dev_priv))
- intel_crt_init(dev);
+ intel_crt_init(dev_priv);
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
* detect the ports.
*/
- intel_ddi_init(dev, PORT_A);
- intel_ddi_init(dev, PORT_B);
- intel_ddi_init(dev, PORT_C);
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
- intel_dsi_init(dev);
+ intel_dsi_init(dev_priv);
} else if (HAS_DDI(dev_priv)) {
int found;
@@ -15494,18 +15681,18 @@ static void intel_setup_outputs(struct drm_device *dev)
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- intel_ddi_init(dev, PORT_A);
+ intel_ddi_init(dev_priv, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
* register */
found = I915_READ(SFUSE_STRAP);
if (found & SFUSE_STRAP_DDIB_DETECTED)
- intel_ddi_init(dev, PORT_B);
+ intel_ddi_init(dev_priv, PORT_B);
if (found & SFUSE_STRAP_DDIC_DETECTED)
- intel_ddi_init(dev, PORT_C);
+ intel_ddi_init(dev_priv, PORT_C);
if (found & SFUSE_STRAP_DDID_DETECTED)
- intel_ddi_init(dev, PORT_D);
+ intel_ddi_init(dev_priv, PORT_D);
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
@@ -15513,35 +15700,35 @@ static void intel_setup_outputs(struct drm_device *dev)
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
- intel_ddi_init(dev, PORT_E);
+ intel_ddi_init(dev_priv, PORT_E);
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
if (has_edp_a(dev_priv))
- intel_dp_init(dev, DP_A, PORT_A);
+ intel_dp_init(dev_priv, DP_A, PORT_A);
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
+ found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
if (!found)
- intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
+ intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_B, PORT_B);
+ intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
}
if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
- intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
+ intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
- intel_hdmi_init(dev, PCH_HDMID, PORT_D);
+ intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
- intel_dp_init(dev, PCH_DP_C, PORT_C);
+ intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
if (I915_READ(PCH_DP_D) & DP_DETECTED)
- intel_dp_init(dev, PCH_DP_D, PORT_D);
+ intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
@@ -15563,16 +15750,16 @@ static void intel_setup_outputs(struct drm_device *dev)
has_edp = intel_dp_is_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
- has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+ has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
- intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
+ intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
- has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+ has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
- intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
+ intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
if (IS_CHERRYVIEW(dev_priv)) {
/*
@@ -15581,63 +15768,63 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
has_port = intel_bios_is_port_present(dev_priv, PORT_D);
if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
- intel_dp_init(dev, CHV_DP_D, PORT_D);
+ intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
- intel_hdmi_init(dev, CHV_HDMID, PORT_D);
+ intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
}
- intel_dsi_init(dev);
+ intel_dsi_init(dev_priv);
} else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
bool found = false;
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
- found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
+ found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
if (!found && IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
- intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
+ intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
}
if (!found && IS_G4X(dev_priv))
- intel_dp_init(dev, DP_B, PORT_B);
+ intel_dp_init(dev_priv, DP_B, PORT_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
- found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
+ found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
}
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
if (IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
- intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
+ intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
}
if (IS_G4X(dev_priv))
- intel_dp_init(dev, DP_C, PORT_C);
+ intel_dp_init(dev_priv, DP_C, PORT_C);
}
if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
- intel_dp_init(dev, DP_D, PORT_D);
+ intel_dp_init(dev_priv, DP_D, PORT_D);
} else if (IS_GEN2(dev_priv))
- intel_dvo_init(dev);
+ intel_dvo_init(dev_priv);
if (SUPPORTS_TV(dev_priv))
- intel_tv_init(dev);
+ intel_tv_init(dev_priv);
- intel_psr_init(dev);
+ intel_psr_init(dev_priv);
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
intel_encoder_clones(encoder);
}
- intel_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev_priv);
- drm_helper_move_panel_connectors_to_head(dev);
+ drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -15874,7 +16061,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
- drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &intel_fb->base, mode_cmd);
intel_fb->obj = obj;
ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
@@ -15912,6 +16099,17 @@ intel_user_framebuffer_create(struct drm_device *dev,
return fb;
}
+static void intel_atomic_state_free(struct drm_atomic_state *state)
+{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+
+ drm_atomic_state_default_release(state);
+
+ i915_sw_fence_fini(&intel_state->commit_ready);
+
+ kfree(state);
+}
+
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fbdev_output_poll_changed,
@@ -15919,6 +16117,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.atomic_commit = intel_atomic_commit,
.atomic_state_alloc = intel_atomic_state_alloc,
.atomic_state_clear = intel_atomic_state_clear,
+ .atomic_state_free = intel_atomic_state_free,
};
/**
@@ -15999,7 +16198,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed;
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
dev_priv->display.get_display_clock_speed =
broxton_get_display_clock_speed;
else if (IS_BROADWELL(dev_priv))
@@ -16014,14 +16213,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
else if (IS_GEN5(dev_priv))
dev_priv->display.get_display_clock_speed =
ilk_get_display_clock_speed;
- else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
+ else if (IS_I945G(dev_priv) || IS_I965G(dev_priv) ||
IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_GM45(dev_priv))
dev_priv->display.get_display_clock_speed =
gm45_get_display_clock_speed;
- else if (IS_CRESTLINE(dev_priv))
+ else if (IS_I965GM(dev_priv))
dev_priv->display.get_display_clock_speed =
i965gm_get_display_clock_speed;
else if (IS_PINEVIEW(dev_priv))
@@ -16033,7 +16232,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
else if (IS_I915G(dev_priv))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
+ else if (IS_I945GM(dev_priv) || IS_I845G(dev_priv))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_I915GM(dev_priv))
@@ -16072,7 +16271,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
valleyview_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
valleyview_modeset_calc_cdclk;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
bxt_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
@@ -16470,8 +16669,8 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 8192;
}
- if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
- dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
} else if (IS_GEN2(dev_priv)) {
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
@@ -16508,7 +16707,7 @@ int intel_modeset_init(struct drm_device *dev)
/* Just disable it once at startup */
i915_disable_vga(dev_priv);
- intel_setup_outputs(dev);
+ intel_setup_outputs(dev_priv);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev);
@@ -16661,7 +16860,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* Temporarily change the plane mapping and disable everything
* ... */
plane = crtc->plane;
- to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
+ crtc->base.primary->state->visible = true;
crtc->plane = !plane;
intel_crtc_disable_noatomic(&crtc->base);
crtc->plane = plane;
@@ -16813,7 +17012,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
dev_priv->active_crtcs = 0;
for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state = crtc->config;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
@@ -16832,34 +17032,41 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- enableddisabled(crtc->active));
+ enableddisabled(crtc_state->base.active));
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
pll->on = pll->funcs.get_hw_state(dev_priv, pll,
- &pll->config.hw_state);
- pll->config.crtc_mask = 0;
+ &pll->state.hw_state);
+ pll->state.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
- if (crtc->active && crtc->config->shared_dpll == pll)
- pll->config.crtc_mask |= 1 << crtc->pipe;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->base.active &&
+ crtc_state->shared_dpll == pll)
+ pll->state.crtc_mask |= 1 << crtc->pipe;
}
- pll->active_mask = pll->config.crtc_mask;
+ pll->active_mask = pll->state.crtc_mask;
DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
- pll->name, pll->config.crtc_mask, pll->on);
+ pll->name, pll->state.crtc_mask, pll->on);
}
for_each_intel_encoder(dev, encoder) {
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
+ struct intel_crtc_state *crtc_state;
+
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
- crtc->config->output_types |= 1 << encoder->type;
- encoder->get_config(encoder, crtc->config);
+ crtc_state->output_types |= 1 << encoder->type;
+ encoder->get_config(encoder, crtc_state);
} else {
encoder->base.crtc = NULL;
}
@@ -16900,14 +17107,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
int pixclk = 0;
- crtc->base.hwmode = crtc->config->base.adjusted_mode;
+ crtc->base.hwmode = crtc_state->base.adjusted_mode;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
- if (crtc->base.state->active) {
- intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
- intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+ if (crtc_state->base.active) {
+ intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
+ intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
/*
@@ -16915,29 +17124,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* the atomic core happy. It wants a valid mode if the
* crtc's enabled, so we do the above call.
*
- * At this point some state updated by the connectors
- * in their ->detect() callback has not run yet, so
- * no recalculation can be done yet.
- *
- * Even if we could do a recalculation and modeset
- * right now it would cause a double modeset if
- * fbdev or userspace chooses a different initial mode.
- *
- * If that happens, someone indicated they wanted a
- * mode change, which means it's safe to do a full
- * recalculation.
+ * But we don't set all the derived state fully, hence
+ * set a flag to indicate that a full recalculation is
+ * needed on the next commit.
*/
- crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+ crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- pixclk = ilk_pipe_pixel_rate(crtc->config);
+ pixclk = ilk_pipe_pixel_rate(crtc_state);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pixclk = crtc->config->base.adjusted_mode.crtc_clock;
+ pixclk = crtc_state->base.adjusted_mode.crtc_clock;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixclk = DIV_ROUND_UP(pixclk * 100, 95);
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
@@ -16946,7 +17147,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
dev_priv->min_pixclk[crtc->pipe] = pixclk;
- intel_pipe_config_sanity_check(dev_priv, crtc->config);
+ intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
}
@@ -17120,7 +17321,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_gt_powersave(dev_priv);
- intel_teardown_gmbus(dev);
+ intel_teardown_gmbus(dev_priv);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0b8e8eb85c19..d1670b8afbf5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -156,38 +156,28 @@ static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
u8 source_max, sink_max;
source_max = intel_dig_port->max_lanes;
- sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+ sink_max = intel_dp->max_sink_lane_count;
return min(source_max, sink_max);
}
-/*
- * The units on the numbers in the next two are... bizarre. Examples will
- * make it clearer; this one parallels an example in the eDP spec.
- *
- * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
- *
- * 270000 * 1 * 8 / 10 == 216000
- *
- * The actual data capacity of that configuration is 2.16Gbit/s, so the
- * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
- * or equivalently, kilopixels per second - so for 1680x1050R it'd be
- * 119000. At 18bpp that's 2142000 kilobits per second.
- *
- * Thus the strange-looking division by 10 in intel_dp_link_required, to
- * get the result in decakilobits instead of kilobits.
- */
-
-static int
+int
intel_dp_link_required(int pixel_clock, int bpp)
{
- return (pixel_clock * bpp + 9) / 10;
+ /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
+ return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
-static int
+int
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
- return (max_link_clock * max_lanes * 8) / 10;
+ /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
+ * link rate that is generally expressed in Gbps. Since, 8 bits of data
+ * is transmitted every LS_Clk per lane, there is no need to account for
+ * the channel encoding that is done in the PHY layer here.
+ */
+
+ return max_link_clock * max_lanes;
}
static int
@@ -223,7 +213,7 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
*sink_rates = default_rates;
- return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+ return (intel_dp->max_sink_link_bw >> 3) + 1;
}
static int
@@ -233,7 +223,7 @@ intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
int size;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
@@ -288,6 +278,44 @@ static int intel_dp_common_rates(struct intel_dp *intel_dp,
common_rates);
}
+static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
+ int *common_rates, int link_rate)
+{
+ int common_len;
+ int index;
+
+ common_len = intel_dp_common_rates(intel_dp, common_rates);
+ for (index = 0; index < common_len; index++) {
+ if (link_rate == common_rates[common_len - index - 1])
+ return common_len - index - 1;
+ }
+
+ return -1;
+}
+
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+ int link_rate, uint8_t lane_count)
+{
+ int common_rates[DP_MAX_SUPPORTED_RATES];
+ int link_rate_index;
+
+ link_rate_index = intel_dp_link_rate_index(intel_dp,
+ common_rates,
+ link_rate);
+ if (link_rate_index > 0) {
+ intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]);
+ intel_dp->max_sink_lane_count = lane_count;
+ } else if (lane_count > 1) {
+ intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
+ intel_dp->max_sink_lane_count = lane_count >> 1;
+ } else {
+ DRM_ERROR("Link Training Unsuccessful\n");
+ return -1;
+ }
+
+ return 0;
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -465,14 +493,50 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
}
}
+static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
+
+ /*
+ * We don't have power sequencer currently.
+ * Pick one that's not used by other ports.
+ */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp;
+
+ if (encoder->type != INTEL_OUTPUT_DP &&
+ encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe != intel_dp->pps_pipe);
+
+ if (intel_dp->pps_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps_pipe);
+ } else {
+ WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
+
+ if (intel_dp->active_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->active_pipe);
+ }
+ }
+
+ if (pipes == 0)
+ return INVALID_PIPE;
+
+ return ffs(pipes) - 1;
+}
+
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder;
- unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
enum pipe pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -480,33 +544,20 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
/* We should never land here with regular DP ports */
WARN_ON(!is_edp(intel_dp));
+ WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe != intel_dp->pps_pipe);
+
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
- /*
- * We don't have power sequencer currently.
- * Pick one that's not used by other ports.
- */
- for_each_intel_encoder(dev, encoder) {
- struct intel_dp *tmp;
-
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- tmp = enc_to_intel_dp(&encoder->base);
-
- if (tmp->pps_pipe != INVALID_PIPE)
- pipes &= ~(1 << tmp->pps_pipe);
- }
+ pipe = vlv_find_free_pps(dev_priv);
/*
* Didn't find one. This should not happen since there
* are two power sequencers and up to two eDP ports.
*/
- if (WARN_ON(pipes == 0))
+ if (WARN_ON(pipe == INVALID_PIPE))
pipe = PIPE_A;
- else
- pipe = ffs(pipes) - 1;
vlv_steal_power_sequencer(dev, pipe);
intel_dp->pps_pipe = pipe;
@@ -646,7 +697,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv)))
+ !IS_GEN9_LP(dev_priv)))
return;
/*
@@ -662,11 +713,18 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
for_each_intel_encoder(dev, encoder) {
struct intel_dp *intel_dp;
- if (encoder->type != INTEL_OUTPUT_EDP)
+ if (encoder->type != INTEL_OUTPUT_DP &&
+ encoder->type != INTEL_OUTPUT_EDP)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- if (IS_BROXTON(dev_priv))
+
+ WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ if (IS_GEN9_LP(dev_priv))
intel_dp->pps_reset = true;
else
intel_dp->pps_pipe = INVALID_PIPE;
@@ -689,7 +747,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
memset(regs, 0, sizeof(*regs));
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
pps_idx = bxt_power_sequencer_idx(intel_dp);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pps_idx = vlv_power_sequencer_pipe(intel_dp);
@@ -698,7 +756,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
regs->pp_stat = PP_STATUS(pps_idx);
regs->pp_on = PP_ON_DELAYS(pps_idx);
regs->pp_off = PP_OFF_DELAYS(pps_idx);
- if (!IS_BROXTON(dev_priv))
+ if (!IS_GEN9_LP(dev_priv))
regs->pp_div = PP_DIVISOR(pps_idx);
}
@@ -1655,7 +1713,9 @@ found:
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
*/
pipe_config->limited_color_range =
- bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
+ bpp != 18 &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
pipe_config->limited_color_range =
intel_dp->limited_color_range;
@@ -2402,6 +2462,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D3);
} else {
+ struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
+
/*
* When turning on, we need to retry for 1ms to give the sink
* time to wake up.
@@ -2413,6 +2475,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
break;
msleep(1);
}
+
+ if (ret == 1 && lspcon->active)
+ lspcon_wait_pcon_mode(lspcon);
}
if (ret != 1)
@@ -2820,6 +2885,11 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
enum pipe pipe = intel_dp->pps_pipe;
i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
+ WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
edp_panel_vdd_off_sync(intel_dp);
/*
@@ -2847,29 +2917,27 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
lockdep_assert_held(&dev_priv->pps_mutex);
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
- return;
-
for_each_intel_encoder(dev, encoder) {
struct intel_dp *intel_dp;
enum port port;
- if (encoder->type != INTEL_OUTPUT_EDP)
+ if (encoder->type != INTEL_OUTPUT_DP &&
+ encoder->type != INTEL_OUTPUT_EDP)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
port = dp_to_dig_port(intel_dp)->port;
+ WARN(intel_dp->active_pipe == pipe,
+ "stealing pipe %c power sequencer from active (e)DP port %c\n",
+ pipe_name(pipe), port_name(port));
+
if (intel_dp->pps_pipe != pipe)
continue;
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(port));
- WARN(encoder->base.crtc,
- "stealing pipe %c power sequencer from active eDP port %c\n",
- pipe_name(pipe), port_name(port));
-
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
}
@@ -2885,19 +2953,17 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
- return;
+ WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
- if (intel_dp->pps_pipe == crtc->pipe)
- return;
-
- /*
- * If another power sequencer was being used on this
- * port previously make sure to turn off vdd there while
- * we still have control of it.
- */
- if (intel_dp->pps_pipe != INVALID_PIPE)
+ if (intel_dp->pps_pipe != INVALID_PIPE &&
+ intel_dp->pps_pipe != crtc->pipe) {
+ /*
+ * If another power sequencer was being used on this
+ * port previously make sure to turn off vdd there while
+ * we still have control of it.
+ */
vlv_detach_power_sequencer(intel_dp);
+ }
/*
* We may be stealing the power
@@ -2905,6 +2971,11 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
*/
vlv_steal_power_sequencer(dev, crtc->pipe);
+ intel_dp->active_pipe = crtc->pipe;
+
+ if (!is_edp(intel_dp))
+ return;
+
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
@@ -2973,6 +3044,32 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
+static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
+{
+ uint8_t psr_caps = 0;
+
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps);
+ return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+}
+
+static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+{
+ uint8_t dprx = 0;
+
+ drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dprx);
+ return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
+}
+
+static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
+{
+ uint8_t alpm_caps = 0;
+
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps);
+ return alpm_caps & DP_ALPM_CAP;
+}
+
/* These are source-specific values. */
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
@@ -2980,7 +3077,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_GEN(dev_priv) >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
@@ -3343,7 +3440,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
@@ -3491,6 +3588,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(intel_dp->panel_power_down_delay);
intel_dp->DP = DP;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ pps_lock(intel_dp);
+ intel_dp->active_pipe = INVALID_PIPE;
+ pps_unlock(intel_dp);
+ }
}
bool
@@ -3545,6 +3648,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
DRM_DEBUG_KMS("PSR2 %s on sink",
dev_priv->psr.psr2_support ? "supported" : "not supported");
+
+ if (dev_priv->psr.psr2_support) {
+ dev_priv->psr.y_cord_support =
+ intel_dp_get_y_cord_status(intel_dp);
+ dev_priv->psr.colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+ dev_priv->psr.alpm =
+ intel_dp_get_alpm_status(intel_dp);
+ }
+
}
/* Read the eDP Display control capabilities registers */
@@ -3569,7 +3682,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (val == 0)
break;
- /* Value read is in kHz while drm clock is saved in deca-kHz */
+ /* Value read multiplied by 200kHz gives the per-lane
+ * link rate in kHz. The source rates are, however,
+ * stored in terms of LS_Clk kHz. The full conversion
+ * back to symbols is
+ * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
+ */
intel_dp->sink_rates[i] = (val * 200) / 10;
}
intel_dp->num_sink_rates = i;
@@ -3835,7 +3953,7 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
intel_dp->aux.i2c_nack_count,
intel_dp->aux.i2c_defer_count);
- intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
+ intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
struct edid *block = intel_connector->detect_edid;
@@ -3851,11 +3969,11 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
- intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
+ intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_STANDARD;
}
/* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance_test_active = 1;
+ intel_dp->compliance.test_active = 1;
return test_result;
}
@@ -3881,22 +3999,22 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
switch (rxdata) {
case DP_TEST_LINK_TRAINING:
DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
- intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
+ intel_dp->compliance.test_type = DP_TEST_LINK_TRAINING;
response = intel_dp_autotest_link_training(intel_dp);
break;
case DP_TEST_LINK_VIDEO_PATTERN:
DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
- intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
+ intel_dp->compliance.test_type = DP_TEST_LINK_VIDEO_PATTERN;
response = intel_dp_autotest_video_pattern(intel_dp);
break;
case DP_TEST_LINK_EDID_READ:
DRM_DEBUG_KMS("EDID test requested\n");
- intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
+ intel_dp->compliance.test_type = DP_TEST_LINK_EDID_READ;
response = intel_dp_autotest_edid(intel_dp);
break;
case DP_TEST_LINK_PHY_TEST_PATTERN:
DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
- intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
+ intel_dp->compliance.test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
response = intel_dp_autotest_phy_pattern(intel_dp);
break;
default:
@@ -4020,7 +4138,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
/* if link training is requested we should perform it always */
- if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
+ if ((intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
@@ -4054,9 +4172,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
* Clearing compliance test variables to allow capturing
* of values for next automated test request.
*/
- intel_dp->compliance_test_active = 0;
- intel_dp->compliance_test_type = 0;
- intel_dp->compliance_test_data = 0;
+ memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
/*
* Now read the DPCD to see if it's actually running
@@ -4148,9 +4264,10 @@ static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_connector_status status;
- status = intel_panel_detect(dev);
+ status = intel_panel_detect(dev_priv);
if (status == connector_status_unknown)
status = connector_status_connected;
@@ -4289,14 +4406,14 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
*
* Return %true if @port is connected, %false otherwise.
*/
-static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
{
if (HAS_PCH_IBX(dev_priv))
return ibx_digital_port_connected(dev_priv, port);
else if (HAS_PCH_SPLIT(dev_priv))
return cpt_digital_port_connected(dev_priv, port);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
else if (IS_GM45(dev_priv))
return gm45_digital_port_connected(dev_priv, port);
@@ -4373,9 +4490,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
status = connector_status_disconnected;
if (status == connector_status_disconnected) {
- intel_dp->compliance_test_active = 0;
- intel_dp->compliance_test_type = 0;
- intel_dp->compliance_test_data = 0;
+ memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
@@ -4396,6 +4511,12 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
yesno(intel_dp_source_supports_hbr2(intel_dp)),
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
+ /* Set the max lane count for sink */
+ intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ /* Set the max link BW for sink */
+ intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
+
intel_dp_print_rates(intel_dp);
intel_dp_read_desc(intel_dp);
@@ -4751,27 +4872,41 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
edp_panel_vdd_schedule_off(intel_dp);
}
+static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+
+ if ((intel_dp->DP & DP_PORT_EN) == 0)
+ return INVALID_PIPE;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
+ else
+ return PORT_TO_PIPE(intel_dp->DP);
+}
+
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
- if (IS_GEN9(dev_priv) && lspcon->active)
+ if (lspcon->active)
lspcon_resume(lspcon);
- if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
- return;
-
pps_lock(intel_dp);
- /* Reinit the power sequencer, in case BIOS did something with it. */
- intel_dp_pps_init(encoder->dev, intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+
+ if (is_edp(intel_dp)) {
+ /* Reinit the power sequencer, in case BIOS did something with it. */
+ intel_dp_pps_init(encoder->dev, intel_dp);
+ intel_edp_panel_vdd_sanitize(intel_dp);
+ }
pps_unlock(intel_dp);
}
@@ -4879,7 +5014,7 @@ bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
if (INTEL_GEN(dev_priv) < 5)
return false;
- if (port == PORT_A)
+ if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
return true;
return intel_bios_is_port_edp(dev_priv, port);
@@ -4926,7 +5061,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
pp_on = I915_READ(regs.pp_on);
pp_off = I915_READ(regs.pp_off);
- if (!IS_BROXTON(dev_priv)) {
+ if (!IS_GEN9_LP(dev_priv)) {
I915_WRITE(regs.pp_ctrl, pp_ctl);
pp_div = I915_READ(regs.pp_div);
}
@@ -4944,7 +5079,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
BXT_POWER_CYCLE_DELAY_SHIFT;
if (tmp > 0)
@@ -5101,7 +5236,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
@@ -5127,7 +5262,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
@@ -5135,7 +5270,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- IS_BROXTON(dev_priv) ?
+ IS_GEN9_LP(dev_priv) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
}
@@ -5515,7 +5650,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
}
downclock_mode = intel_find_panel_downclock
- (dev, fixed_mode, connector);
+ (dev_priv, fixed_mode, connector);
if (!downclock_mode) {
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
@@ -5624,10 +5759,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* If the current pipe isn't valid, try the PPS pipe, and if that
* fails just assume pipe A.
*/
- if (IS_CHERRYVIEW(dev_priv))
- pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
- else
- pipe = PORT_TO_PIPE(intel_dp->DP);
+ pipe = vlv_active_pipe(intel_dp);
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = intel_dp->pps_pipe;
@@ -5676,6 +5808,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
return false;
intel_dp->pps_pipe = INVALID_PIPE;
+ intel_dp->active_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
if (INTEL_GEN(dev_priv) >= 9)
@@ -5704,6 +5837,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
type = DRM_MODE_CONNECTOR_DisplayPort;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
* for DP the encoder type can be set by the caller to
@@ -5793,11 +5929,10 @@ fail:
return false;
}
-bool intel_dp_init(struct drm_device *dev,
+bool intel_dp_init(struct drm_i915_private *dev_priv,
i915_reg_t output_reg,
enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -5814,8 +5949,9 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
- if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
+ if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "DP %c", port_name(port)))
goto err_encoder_init;
intel_encoder->compute_config = intel_dp_compute_config;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index b029d1026a28..38e3ca2f6f18 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -37,6 +37,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
struct drm_atomic_state *state;
int bpp;
int lane_count, slots;
@@ -58,6 +60,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
state = pipe_config->base.state;
+ if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port))
+ pipe_config->has_audio = true;
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
pipe_config->pbn = mst_pbn;
@@ -83,6 +87,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
@@ -93,6 +98,10 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
if (ret) {
DRM_ERROR("failed to update payload %d\n", ret);
}
+ if (old_crtc_state->has_audio) {
+ intel_audio_codec_disable(encoder);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+ }
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
@@ -205,6 +214,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+ if (pipe_config->has_audio) {
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
+ }
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -227,6 +240,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
+ pipe_config->has_audio =
+ intel_ddi_is_audio_enabled(dev_priv, crtc);
+
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -334,7 +350,17 @@ static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int bpp = 24; /* MST uses fixed bpp */
+ int max_rate, mode_rate, max_lanes, max_link_clock;
+
+ max_link_clock = intel_dp_max_link_rate(intel_dp);
+ max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+ mode_rate = intel_dp_link_required(mode->clock, bpp);
/* TODO - validate mode against available PBN for link */
if (mode->clock < 10000)
@@ -343,7 +369,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
- if (mode->clock > max_dotclk)
+ if (mode_rate > max_rate || mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -561,7 +587,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev,
+ &intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->can_mst = false;
return ret;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 7a8e82dabbf2..09b670929786 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -131,6 +131,18 @@ struct bxt_ddi_phy_info {
enum dpio_phy rcomp_phy;
/**
+ * @reset_delay: delay in us to wait before setting the common reset
+ * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
+ */
+ int reset_delay;
+
+ /**
+ * @pwron_mask: Mask with the appropriate bit set that would cause the
+ * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
+ */
+ u32 pwron_mask;
+
+ /**
* @channel: struct containing per channel information.
*/
struct {
@@ -145,6 +157,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
[DPIO_PHY0] = {
.dual_channel = true,
.rcomp_phy = DPIO_PHY1,
+ .pwron_mask = BIT(0),
.channel = {
[DPIO_CH0] = { .port = PORT_B },
@@ -154,6 +167,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
[DPIO_PHY1] = {
.dual_channel = false,
.rcomp_phy = -1,
+ .pwron_mask = BIT(1),
.channel = {
[DPIO_CH0] = { .port = PORT_A },
@@ -161,20 +175,77 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
},
};
+static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
+ [DPIO_PHY0] = {
+ .dual_channel = false,
+ .rcomp_phy = DPIO_PHY1,
+ .pwron_mask = BIT(0),
+ .reset_delay = 20,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_B },
+ }
+ },
+ [DPIO_PHY1] = {
+ .dual_channel = false,
+ .rcomp_phy = -1,
+ .pwron_mask = BIT(3),
+ .reset_delay = 20,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_A },
+ }
+ },
+ [DPIO_PHY2] = {
+ .dual_channel = false,
+ .rcomp_phy = DPIO_PHY1,
+ .pwron_mask = BIT(1),
+ .reset_delay = 20,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_C },
+ }
+ },
+};
+
static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
{
return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
BIT(phy_info->channel[DPIO_CH0].port);
}
-void bxt_port_to_phy_channel(enum port port,
+static const struct bxt_ddi_phy_info *
+bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
+{
+ if (IS_GEMINILAKE(dev_priv)) {
+ *count = ARRAY_SIZE(glk_ddi_phy_info);
+ return glk_ddi_phy_info;
+ } else {
+ *count = ARRAY_SIZE(bxt_ddi_phy_info);
+ return bxt_ddi_phy_info;
+ }
+}
+
+static const struct bxt_ddi_phy_info *
+bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ int count;
+ const struct bxt_ddi_phy_info *phy_list =
+ bxt_get_phy_list(dev_priv, &count);
+
+ return &phy_list[phy];
+}
+
+void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch)
{
- const struct bxt_ddi_phy_info *phy_info;
- int i;
+ const struct bxt_ddi_phy_info *phy_info, *phys;
+ int i, count;
- for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) {
- phy_info = &bxt_ddi_phy_info[i];
+ phys = bxt_get_phy_list(dev_priv, &count);
+
+ for (i = 0; i < count; i++) {
+ phy_info = &phys[i];
if (port == phy_info->channel[DPIO_CH0].port) {
*phy = i;
@@ -203,7 +274,7 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
enum dpio_phy phy;
enum dpio_channel ch;
- bxt_port_to_phy_channel(port, &phy, &ch);
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/*
* While we write to the group register to program all lanes at once we
@@ -241,10 +312,12 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ const struct bxt_ddi_phy_info *phy_info;
enum port port;
- if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
+ if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
return false;
if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
@@ -255,14 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
return false;
}
- if (phy_info->rcomp_phy == -1 &&
- !(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n",
- phy);
-
- return false;
- }
-
if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
phy);
@@ -306,9 +371,11 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ const struct bxt_ddi_phy_info *phy_info;
u32 val;
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy_info->rcomp_phy != -1)
@@ -317,7 +384,6 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
-
return;
}
@@ -326,7 +392,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
}
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val |= GT_DISPLAY_POWER_ON(phy);
+ val |= phy_info->pwron_mask;
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
/*
@@ -367,6 +433,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
if (phy_info->rcomp_phy != -1) {
uint32_t grc_code;
+
+ bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
+
/*
* PHY0 isn't connected to an RCOMP resistor so copy over
* the corresponding calibrated value from PHY1, and disable
@@ -384,31 +453,34 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
I915_WRITE(BXT_PORT_REF_DW8(phy), val);
}
+ if (phy_info->reset_delay)
+ udelay(phy_info->reset_delay);
+
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
- if (phy_info->rcomp_phy == -1)
- bxt_phy_wait_grc_done(dev_priv, phy);
-
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
+ const struct bxt_ddi_phy_info *phy_info;
uint32_t val;
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val &= ~GT_DISPLAY_POWER_ON(phy);
+ val &= ~phy_info->pwron_mask;
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ const struct bxt_ddi_phy_info *phy_info =
+ bxt_get_phy_info(dev_priv, phy);
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
bool was_enabled;
@@ -461,10 +533,12 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ const struct bxt_ddi_phy_info *phy_info;
uint32_t mask;
bool ok;
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
#define _CHK(reg, mask, exp, fmt, ...) \
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
@@ -540,7 +614,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
enum dpio_channel ch;
int lane;
- bxt_port_to_phy_channel(port, &phy, &ch);
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
@@ -568,7 +642,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
int lane;
uint8_t mask;
- bxt_port_to_phy_channel(port, &phy, &ch);
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
mask = 0;
for (lane = 0; lane < 4; lane++) {
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index a2f0e070d38d..e59e43a9f3a6 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -23,6 +23,25 @@
#include "intel_drv.h"
+/**
+ * DOC: Display PLLs
+ *
+ * Display PLLs used for driving outputs vary by platform. While some have
+ * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
+ * from a pool. In the latter scenario, it is possible that multiple pipes
+ * share a PLL if their configurations match.
+ *
+ * This file provides an abstraction over display PLLs. The function
+ * intel_shared_dpll_init() initializes the PLLs for the given platform. The
+ * users of a PLL are tracked and that tracking is integrated with the atomic
+ * modest interface. During an atomic operation, a PLL can be requested for a
+ * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
+ * a previously used PLL can be released with intel_release_shared_dpll().
+ * Changes to the users are first staged in the atomic state, and then made
+ * effective by calling intel_shared_dpll_swap_state() during the atomic
+ * commit phase.
+ */
+
struct intel_shared_dpll *
skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
{
@@ -38,11 +57,11 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
- if (pll->config.crtc_mask == 0)
+ if (pll->state.crtc_mask == 0)
continue;
- if (memcmp(&dpll_hw_state, &pll->config.hw_state,
- sizeof(pll->config.hw_state)) == 0) {
+ if (memcmp(&dpll_hw_state, &pll->state.hw_state,
+ sizeof(pll->state.hw_state)) == 0) {
found = true;
break;
}
@@ -52,8 +71,8 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
for (i = DPLL_ID_SKL_DPLL1;
((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
pll = &dev_priv->shared_dplls[i];
- if (pll->config.crtc_mask == 0) {
- pll->config.hw_state = dpll_hw_state;
+ if (pll->state.crtc_mask == 0) {
+ pll->state.hw_state = dpll_hw_state;
break;
}
}
@@ -61,6 +80,45 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
return pll;
}
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll_state *shared_dpll)
+{
+ enum intel_dpll_id i;
+
+ /* Copy shared dpll state */
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ shared_dpll[i] = pll->state;
+ }
+}
+
+static struct intel_shared_dpll_state *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+ WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+ if (!state->dpll_set) {
+ state->dpll_set = true;
+
+ intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+ state->shared_dpll);
+ }
+
+ return state->shared_dpll;
+}
+
+/**
+ * intel_get_shared_dpll_by_id - get a DPLL given its id
+ * @dev_priv: i915 device instance
+ * @id: pll id
+ *
+ * Returns:
+ * A pointer to the DPLL with @id
+ */
struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
@@ -68,6 +126,14 @@ intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
return &dev_priv->shared_dplls[id];
}
+/**
+ * intel_get_shared_dpll_id - get the id of a DPLL
+ * @dev_priv: i915 device instance
+ * @pll: the DPLL
+ *
+ * Returns:
+ * The id of @pll
+ */
enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
@@ -79,28 +145,6 @@ intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
}
-void
-intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
- struct intel_shared_dpll *pll,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
-
- config[id].crtc_mask |= 1 << crtc->pipe;
-}
-
-void
-intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
- struct intel_shared_dpll *pll,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
-
- config[id].crtc_mask &= ~(1 << crtc->pipe);
-}
-
/* For ILK+ */
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
@@ -118,6 +162,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
pll->name, onoff(state), onoff(cur_state));
}
+/**
+ * intel_prepare_shared_dpll - call a dpll's prepare hook
+ * @crtc: CRTC which has a shared dpll
+ *
+ * This calls the PLL's prepare hook if it has one and if the PLL is not
+ * already enabled. The prepare hook is platform specific.
+ */
void intel_prepare_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -128,24 +179,22 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
return;
mutex_lock(&dev_priv->dpll_lock);
- WARN_ON(!pll->config.crtc_mask);
+ WARN_ON(!pll->state.crtc_mask);
if (!pll->active_mask) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
- pll->funcs.mode_set(dev_priv, pll);
+ pll->funcs.prepare(dev_priv, pll);
}
mutex_unlock(&dev_priv->dpll_lock);
}
/**
- * intel_enable_shared_dpll - enable PCH PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
+ * intel_enable_shared_dpll - enable a CRTC's shared DPLL
+ * @crtc: CRTC which has a shared DPLL
*
- * The PCH PLL needs to be enabled before the PCH transcoder, since it
- * drives the transcoder clock.
+ * Enable the shared DPLL used by @crtc.
*/
void intel_enable_shared_dpll(struct intel_crtc *crtc)
{
@@ -161,7 +210,7 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
mutex_lock(&dev_priv->dpll_lock);
old_mask = pll->active_mask;
- if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) ||
+ if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
WARN_ON(pll->active_mask & crtc_mask))
goto out;
@@ -186,6 +235,12 @@ out:
mutex_unlock(&dev_priv->dpll_lock);
}
+/**
+ * intel_disable_shared_dpll - disable a CRTC's shared DPLL
+ * @crtc: CRTC which has a shared DPLL
+ *
+ * Disable the shared DPLL used by @crtc.
+ */
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -230,7 +285,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
- struct intel_shared_dpll_config *shared_dpll;
+ struct intel_shared_dpll_state *shared_dpll;
enum intel_dpll_id i;
shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
@@ -270,7 +325,7 @@ static void
intel_reference_shared_dpll(struct intel_shared_dpll *pll,
struct intel_crtc_state *crtc_state)
{
- struct intel_shared_dpll_config *shared_dpll;
+ struct intel_shared_dpll_state *shared_dpll;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum intel_dpll_id i = pll->id;
@@ -284,13 +339,24 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll,
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
- intel_shared_dpll_config_get(shared_dpll, pll, crtc);
+ shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
}
-void intel_shared_dpll_commit(struct drm_atomic_state *state)
+/**
+ * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
+ * @state: atomic state
+ *
+ * This is the dpll version of drm_atomic_helper_swap_state() since the
+ * helper does not handle driver-specific global state.
+ *
+ * For consistency with atomic helpers this function does a complete swap,
+ * i.e. it also puts the current state into @state, even though there is no
+ * need for that at this moment.
+ */
+void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_shared_dpll_config *shared_dpll;
+ struct intel_shared_dpll_state *shared_dpll;
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
@@ -299,8 +365,13 @@ void intel_shared_dpll_commit(struct drm_atomic_state *state)
shared_dpll = to_intel_atomic_state(state)->shared_dpll;
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll_state tmp;
+
pll = &dev_priv->shared_dplls[i];
- pll->config = shared_dpll[i];
+
+ tmp = pll->state;
+ pll->state = shared_dpll[i];
+ shared_dpll[i] = tmp;
}
}
@@ -323,11 +394,11 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
return val & DPLL_VCO_ENABLE;
}
-static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
{
- I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
- I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
+ I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1);
}
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
@@ -349,7 +420,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
- I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+ I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pll->id));
@@ -360,7 +431,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+ I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -412,8 +483,19 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return pll;
}
+static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
+}
+
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
- .mode_set = ibx_pch_dpll_mode_set,
+ .prepare = ibx_pch_dpll_prepare,
.enable = ibx_pch_dpll_enable,
.disable = ibx_pch_dpll_disable,
.get_hw_state = ibx_pch_dpll_get_hw_state,
@@ -422,7 +504,7 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
+ I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll);
POSTING_READ(WRPLL_CTL(pll->id));
udelay(20);
}
@@ -430,7 +512,7 @@ static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
+ I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
POSTING_READ(SPLL_CTL);
udelay(20);
}
@@ -798,6 +880,13 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return pll;
}
+static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ hw_state->wrpll, hw_state->spll);
+}
+
static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
@@ -873,7 +962,7 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
DPLL_CTRL1_LINK_RATE_MASK(pll->id));
- val |= pll->config.hw_state.ctrl1 << (pll->id * 6);
+ val |= pll->state.hw_state.ctrl1 << (pll->id * 6);
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
@@ -886,8 +975,8 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
skl_ddi_pll_write_ctrl1(dev_priv, pll);
- I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
- I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
+ I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1);
+ I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2);
POSTING_READ(regs[pll->id].cfgcr1);
POSTING_READ(regs[pll->id].cfgcr2);
@@ -1353,6 +1442,16 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return pll;
}
+static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ DRM_DEBUG_KMS("dpll_hw_state: "
+ "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
+ hw_state->ctrl1,
+ hw_state->cfgcr1,
+ hw_state->cfgcr2);
+}
+
static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
@@ -1373,13 +1472,23 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
enum dpio_phy phy;
enum dpio_channel ch;
- bxt_port_to_phy_channel(port, &phy, &ch);
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/* Non-SSC reference */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_REF_SEL;
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ if (IS_GEMINILAKE(dev_priv)) {
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp |= PORT_PLL_POWER_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+ if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+ PORT_PLL_POWER_STATE), 200))
+ DRM_ERROR("Power state not set for PLL:%d\n", port);
+ }
+
/* Disable 10 bit clock */
temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
@@ -1388,31 +1497,31 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
/* Write P1 & P2 */
temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
- temp |= pll->config.hw_state.ebb0;
+ temp |= pll->state.hw_state.ebb0;
I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
/* Write M2 integer */
temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
temp &= ~PORT_PLL_M2_MASK;
- temp |= pll->config.hw_state.pll0;
+ temp |= pll->state.hw_state.pll0;
I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
/* Write N */
temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
temp &= ~PORT_PLL_N_MASK;
- temp |= pll->config.hw_state.pll1;
+ temp |= pll->state.hw_state.pll1;
I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
/* Write M2 fraction */
temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
- temp |= pll->config.hw_state.pll2;
+ temp |= pll->state.hw_state.pll2;
I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
/* Write M2 fraction enable */
temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
- temp |= pll->config.hw_state.pll3;
+ temp |= pll->state.hw_state.pll3;
I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
/* Write coeff */
@@ -1420,24 +1529,24 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
- temp |= pll->config.hw_state.pll6;
+ temp |= pll->state.hw_state.pll6;
I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
- temp |= pll->config.hw_state.pll8;
+ temp |= pll->state.hw_state.pll8;
I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
- temp |= pll->config.hw_state.pll9;
+ temp |= pll->state.hw_state.pll9;
I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
- temp |= pll->config.hw_state.pll10;
+ temp |= pll->state.hw_state.pll10;
I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
@@ -1445,7 +1554,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp |= PORT_PLL_RECALIBRATE;
I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- temp |= pll->config.hw_state.ebb4;
+ temp |= pll->state.hw_state.ebb4;
I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
@@ -1458,6 +1567,12 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
200))
DRM_ERROR("PLL %d not locked\n", port);
+ if (IS_GEMINILAKE(dev_priv)) {
+ temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
+ temp |= DCC_DELAY_RANGE_2;
+ I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
+ }
+
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
@@ -1465,7 +1580,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
- temp |= pll->config.hw_state.pcsdw12;
+ temp |= pll->state.hw_state.pcsdw12;
I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
@@ -1479,6 +1594,16 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
temp &= ~PORT_PLL_ENABLE;
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp &= ~PORT_PLL_POWER_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+ if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+ PORT_PLL_POWER_STATE), 200))
+ DRM_ERROR("Power state not reset for PLL:%d\n", port);
+ }
}
static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -1491,7 +1616,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy;
enum dpio_channel ch;
- bxt_port_to_phy_channel(port, &phy, &ch);
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
@@ -1759,6 +1884,25 @@ bxt_get_dpll(struct intel_crtc *crtc,
return pll;
}
+static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+ "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+ hw_state->ebb0,
+ hw_state->ebb4,
+ hw_state->pll0,
+ hw_state->pll1,
+ hw_state->pll2,
+ hw_state->pll3,
+ hw_state->pll6,
+ hw_state->pll8,
+ hw_state->pll9,
+ hw_state->pll10,
+ hw_state->pcsdw12);
+}
+
static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
@@ -1799,6 +1943,9 @@ struct intel_dpll_mgr {
struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
+
+ void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state);
};
static const struct dpll_info pch_plls[] = {
@@ -1810,6 +1957,7 @@ static const struct dpll_info pch_plls[] = {
static const struct intel_dpll_mgr pch_pll_mgr = {
.dpll_info = pch_plls,
.get_dpll = ibx_get_dpll,
+ .dump_hw_state = ibx_dump_hw_state,
};
static const struct dpll_info hsw_plls[] = {
@@ -1825,6 +1973,7 @@ static const struct dpll_info hsw_plls[] = {
static const struct intel_dpll_mgr hsw_pll_mgr = {
.dpll_info = hsw_plls,
.get_dpll = hsw_get_dpll,
+ .dump_hw_state = hsw_dump_hw_state,
};
static const struct dpll_info skl_plls[] = {
@@ -1838,6 +1987,7 @@ static const struct dpll_info skl_plls[] = {
static const struct intel_dpll_mgr skl_pll_mgr = {
.dpll_info = skl_plls,
.get_dpll = skl_get_dpll,
+ .dump_hw_state = skl_dump_hw_state,
};
static const struct dpll_info bxt_plls[] = {
@@ -1850,8 +2000,15 @@ static const struct dpll_info bxt_plls[] = {
static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
.get_dpll = bxt_get_dpll,
+ .dump_hw_state = bxt_dump_hw_state,
};
+/**
+ * intel_shared_dpll_init - Initialize shared DPLLs
+ * @dev: drm device
+ *
+ * Initialize shared DPLLs for @dev.
+ */
void intel_shared_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1861,7 +2018,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dpll_mgr = &skl_pll_mgr;
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
dpll_mgr = &bxt_pll_mgr;
else if (HAS_DDI(dev_priv))
dpll_mgr = &hsw_pll_mgr;
@@ -1895,6 +2052,21 @@ void intel_shared_dpll_init(struct drm_device *dev)
intel_ddi_pll_init(dev);
}
+/**
+ * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
+ * @crtc: CRTC
+ * @crtc_state: atomic state for @crtc
+ * @encoder: encoder
+ *
+ * Find an appropriate DPLL for the given CRTC and encoder combination. A
+ * reference from the @crtc to the returned pll is registered in the atomic
+ * state. That configuration is made effective by calling
+ * intel_shared_dpll_swap_state(). The reference should be released by calling
+ * intel_release_shared_dpll().
+ *
+ * Returns:
+ * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
+ */
struct intel_shared_dpll *
intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
@@ -1908,3 +2080,48 @@ intel_get_shared_dpll(struct intel_crtc *crtc,
return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
}
+
+/**
+ * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
+ * @dpll: dpll in use by @crtc
+ * @crtc: crtc
+ * @state: atomic state
+ *
+ * This function releases the reference from @crtc to @dpll from the
+ * atomic @state. The new configuration is made effective by calling
+ * intel_shared_dpll_swap_state().
+ */
+void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
+ struct intel_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct intel_shared_dpll_state *shared_dpll_state;
+
+ shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
+ shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe);
+}
+
+/**
+ * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
+ * @dev_priv: i915 drm device
+ * @hw_state: hw state to be written to the log
+ *
+ * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
+ */
+void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ if (dev_priv->dpll_mgr) {
+ dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
+ } else {
+ /* fallback for platforms that don't use the shared dpll
+ * infrastructure
+ */
+ DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index f4385353bc11..af1497eb4f9c 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -40,32 +40,72 @@ struct intel_encoder;
struct intel_shared_dpll;
struct intel_dpll_mgr;
+/**
+ * enum intel_dpll_id - possible DPLL ids
+ *
+ * Enumeration of possible IDs for a DPLL. Real shared dpll ids must be >= 0.
+ */
enum intel_dpll_id {
- DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
- /* real shared dpll ids must be >= 0 */
+ /**
+ * @DPLL_ID_PRIVATE: non-shared dpll in use
+ */
+ DPLL_ID_PRIVATE = -1,
+
+ /**
+ * @DPLL_ID_PCH_PLL_A: DPLL A in ILK, SNB and IVB
+ */
DPLL_ID_PCH_PLL_A = 0,
+ /**
+ * @DPLL_ID_PCH_PLL_B: DPLL B in ILK, SNB and IVB
+ */
DPLL_ID_PCH_PLL_B = 1,
- /* hsw/bdw */
+
+
+ /**
+ * @DPLL_ID_WRPLL1: HSW and BDW WRPLL1
+ */
DPLL_ID_WRPLL1 = 0,
+ /**
+ * @DPLL_ID_WRPLL2: HSW and BDW WRPLL2
+ */
DPLL_ID_WRPLL2 = 1,
+ /**
+ * @DPLL_ID_SPLL: HSW and BDW SPLL
+ */
DPLL_ID_SPLL = 2,
+ /**
+ * @DPLL_ID_LCPLL_810: HSW and BDW 0.81 GHz LCPLL
+ */
DPLL_ID_LCPLL_810 = 3,
+ /**
+ * @DPLL_ID_LCPLL_1350: HSW and BDW 1.35 GHz LCPLL
+ */
DPLL_ID_LCPLL_1350 = 4,
+ /**
+ * @DPLL_ID_LCPLL_2700: HSW and BDW 2.7 GHz LCPLL
+ */
DPLL_ID_LCPLL_2700 = 5,
- /* skl */
+
+ /**
+ * @DPLL_ID_SKL_DPLL0: SKL and later DPLL0
+ */
DPLL_ID_SKL_DPLL0 = 0,
+ /**
+ * @DPLL_ID_SKL_DPLL1: SKL and later DPLL1
+ */
DPLL_ID_SKL_DPLL1 = 1,
+ /**
+ * @DPLL_ID_SKL_DPLL2: SKL and later DPLL2
+ */
DPLL_ID_SKL_DPLL2 = 2,
+ /**
+ * @DPLL_ID_SKL_DPLL3: SKL and later DPLL3
+ */
DPLL_ID_SKL_DPLL3 = 3,
};
#define I915_NUM_PLLS 6
-/** Inform the state checker that the DPLL is kept enabled even if not
- * in use by any crtc.
- */
-#define INTEL_DPLL_ALWAYS_ON (1 << 0)
-
struct intel_dpll_hw_state {
/* i9xx, pch plls */
uint32_t dpll;
@@ -93,36 +133,120 @@ struct intel_dpll_hw_state {
pcsdw12;
};
-struct intel_shared_dpll_config {
- unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
+/**
+ * struct intel_shared_dpll_state - hold the DPLL atomic state
+ *
+ * This structure holds an atomic state for the DPLL, that can represent
+ * either its current state (in struct &intel_shared_dpll) or a desired
+ * future state which would be applied by an atomic mode set (stored in
+ * a struct &intel_atomic_state).
+ *
+ * See also intel_get_shared_dpll() and intel_release_shared_dpll().
+ */
+struct intel_shared_dpll_state {
+ /**
+ * @crtc_mask: mask of CRTC using this DPLL, active or not
+ */
+ unsigned crtc_mask;
+
+ /**
+ * @hw_state: hardware configuration for the DPLL stored in
+ * struct &intel_dpll_hw_state.
+ */
struct intel_dpll_hw_state hw_state;
};
+/**
+ * struct intel_shared_dpll_funcs - platform specific hooks for managing DPLLs
+ */
struct intel_shared_dpll_funcs {
- /* The mode_set hook is optional and should be used together with the
- * intel_prepare_shared_dpll function. */
- void (*mode_set)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
+ /**
+ * @prepare:
+ *
+ * Optional hook to perform operations prior to enabling the PLL.
+ * Called from intel_prepare_shared_dpll() function unless the PLL
+ * is already enabled.
+ */
+ void (*prepare)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+
+ /**
+ * @enable:
+ *
+ * Hook for enabling the pll, called from intel_enable_shared_dpll()
+ * if the pll is not already enabled.
+ */
void (*enable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
+
+ /**
+ * @disable:
+ *
+ * Hook for disabling the pll, called from intel_disable_shared_dpll()
+ * only when it is safe to disable the pll, i.e., there are no more
+ * tracked users for it.
+ */
void (*disable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
+
+ /**
+ * @get_hw_state:
+ *
+ * Hook for reading the values currently programmed to the DPLL
+ * registers. This is used for initial hw state readout and state
+ * verification after a mode set.
+ */
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
};
+/**
+ * struct intel_shared_dpll - display PLL with tracked state and users
+ */
struct intel_shared_dpll {
- struct intel_shared_dpll_config config;
+ /**
+ * @state:
+ *
+ * Store the state for the pll, including the its hw state
+ * and CRTCs using it.
+ */
+ struct intel_shared_dpll_state state;
- unsigned active_mask; /* mask of active CRTCs (i.e. DPMS on) */
- bool on; /* is the PLL actually active? Disabled during modeset */
+ /**
+ * @active_mask: mask of active CRTCs (i.e. DPMS on) using this DPLL
+ */
+ unsigned active_mask;
+
+ /**
+ * @on: is the PLL actually active? Disabled during modeset
+ */
+ bool on;
+
+ /**
+ * @name: DPLL name; used for logging
+ */
const char *name;
- /* should match the index in the dev_priv->shared_dplls array */
+
+ /**
+ * @id: unique indentifier for this DPLL; should match the index in the
+ * dev_priv->shared_dplls array
+ */
enum intel_dpll_id id;
+ /**
+ * @funcs: platform specific hooks
+ */
struct intel_shared_dpll_funcs funcs;
+#define INTEL_DPLL_ALWAYS_ON (1 << 0)
+ /**
+ * @flags:
+ *
+ * INTEL_DPLL_ALWAYS_ON
+ * Inform the state checker that the DPLL is kept enabled even if
+ * not in use by any CRTC.
+ */
uint32_t flags;
};
@@ -138,14 +262,6 @@ intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
-void
-intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
- struct intel_shared_dpll *pll,
- struct intel_crtc *crtc);
-void
-intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
- struct intel_shared_dpll *pll,
- struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
@@ -154,12 +270,18 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state,
struct intel_encoder *encoder);
+void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
+ struct intel_crtc *crtc,
+ struct drm_atomic_state *state);
void intel_prepare_shared_dpll(struct intel_crtc *crtc);
void intel_enable_shared_dpll(struct intel_crtc *crtc);
void intel_disable_shared_dpll(struct intel_crtc *crtc);
-void intel_shared_dpll_commit(struct drm_atomic_state *state);
+void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
+void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state);
+
/* BXT dpll related functions */
bool bxt_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ba2323f1b92b..b9cde116dab3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_dp_mst_helper.h>
@@ -358,7 +359,7 @@ struct intel_atomic_state {
/* SKL/KBL Only */
unsigned int cdclk_pll_vco;
- struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
+ struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
/*
* Current watermarks can't be trusted during hardware readout, so
@@ -694,8 +695,9 @@ struct intel_crtc {
* some outputs connected to this crtc.
*/
bool active;
- unsigned long enabled_power_domains;
bool lowfreq_avail;
+ u8 plane_ids_mask;
+ unsigned long enabled_power_domains;
struct intel_overlay *overlay;
struct intel_flip_work *flip_work;
@@ -769,7 +771,8 @@ struct intel_plane_wm_parameters {
struct intel_plane {
struct drm_plane base;
- int plane;
+ u8 plane;
+ enum plane_id id;
enum pipe pipe;
bool can_scale;
int max_downscale;
@@ -843,11 +846,13 @@ struct intel_hdmi {
enum hdmi_picture_aspect aspect_ratio;
struct intel_connector *attached_connector;
void (*write_infoframe)(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode);
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool (*infoframe_enabled)(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
@@ -883,6 +888,16 @@ struct intel_dp_desc {
u8 sw_minor_rev;
} __packed;
+struct intel_dp_compliance_data {
+ unsigned long edid;
+};
+
+struct intel_dp_compliance {
+ unsigned long test_type;
+ struct intel_dp_compliance_data test_data;
+ bool test_active;
+};
+
struct intel_dp {
i915_reg_t output_reg;
i915_reg_t aux_ch_ctl_reg;
@@ -905,6 +920,10 @@ struct intel_dp {
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
+ /* Max lane count for the sink as per DPCD registers */
+ uint8_t max_sink_lane_count;
+ /* Max link BW for the sink as per DPCD registers */
+ int max_sink_link_bw;
/* sink or branch descriptor */
struct intel_dp_desc desc;
struct drm_dp_aux aux;
@@ -928,6 +947,12 @@ struct intel_dp {
*/
enum pipe pps_pipe;
/*
+ * Pipe currently driving the port. Used for preventing
+ * the use of the PPS for any pipe currentrly driving
+ * external DP as that will mess things up on VLV.
+ */
+ enum pipe active_pipe;
+ /*
* Set if the sequencer may be reset due to a power transition,
* requiring a reinitialization. Only relevant on BXT.
*/
@@ -958,9 +983,7 @@ struct intel_dp {
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
/* Displayport compliance testing */
- unsigned long compliance_test_type;
- unsigned long compliance_test_data;
- bool compliance_test_active;
+ struct intel_dp_compliance compliance;
};
struct intel_lspcon {
@@ -1093,6 +1116,12 @@ dp_to_dig_port(struct intel_dp *intel_dp)
return container_of(intel_dp, struct intel_digital_port, dp);
}
+static inline struct intel_lspcon *
+dp_to_lspcon(struct intel_dp *intel_dp)
+{
+ return &dp_to_dig_port(intel_dp)->lspcon;
+}
+
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
@@ -1145,7 +1174,7 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
-void intel_crt_init(struct drm_device *dev);
+void intel_crt_init(struct drm_i915_private *dev_priv);
void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
@@ -1156,7 +1185,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
struct drm_connector_state *old_conn_state);
void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct drm_crtc *crtc);
-void intel_ddi_init(struct drm_device *dev, enum port port);
+void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
@@ -1169,6 +1198,8 @@ bool intel_ddi_pll_select(struct intel_crtc *crtc,
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
struct intel_encoder *
@@ -1215,7 +1246,7 @@ unsigned int intel_fb_xy_to_linear(int x, int y,
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
-bool intel_has_pending_fb_unpin(struct drm_device *dev);
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_i915_private *dev_priv);
void intel_mark_idle(struct drm_i915_private *dev_priv);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
@@ -1386,12 +1417,15 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
-bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
+bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
+ enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count,
bool link_mst);
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+ int link_rate, uint8_t lane_count);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
@@ -1453,6 +1487,10 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool __intel_dp_read_desc(struct intel_dp *intel_dp,
struct intel_dp_desc *desc);
bool intel_dp_read_desc(struct intel_dp *intel_dp);
+int intel_dp_link_required(int pixel_clock, int bpp);
+int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port);
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1461,13 +1499,13 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */
-void intel_dsi_init(struct drm_device *dev);
+void intel_dsi_init(struct drm_i915_private *dev_priv);
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
/* intel_dvo.c */
-void intel_dvo_init(struct drm_device *dev);
+void intel_dvo_init(struct drm_i915_private *dev_priv);
/* intel_hotplug.c */
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
@@ -1531,7 +1569,8 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
-void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
+void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
+ enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
@@ -1542,7 +1581,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
/* intel_lvds.c */
-void intel_lvds_init(struct drm_device *dev);
+void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
@@ -1587,9 +1626,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
-enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
extern struct drm_display_mode *intel_find_panel_downclock(
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
@@ -1615,7 +1654,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
-void intel_psr_init(struct drm_device *dev);
+void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
@@ -1719,7 +1758,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
void intel_update_watermarks(struct intel_crtc *crtc);
void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
-void intel_pm_setup(struct drm_device *dev);
+void intel_pm_setup(struct drm_i915_private *dev_priv);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
@@ -1760,7 +1799,7 @@ static inline int intel_enable_rc6(void)
}
/* intel_sdvo.c */
-bool intel_sdvo_init(struct drm_device *dev,
+bool intel_sdvo_init(struct drm_i915_private *dev_priv,
i915_reg_t reg, enum port port);
@@ -1775,7 +1814,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc);
void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
/* intel_tv.c */
-void intel_tv_init(struct drm_device *dev);
+void intel_tv_init(struct drm_i915_private *dev_priv);
/* intel_atomic.c */
int intel_connector_atomic_get_property(struct drm_connector *connector,
@@ -1787,8 +1826,6 @@ void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
void intel_atomic_state_clear(struct drm_atomic_state *);
-struct intel_shared_dpll_config *
-intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
static inline struct intel_crtc_state *
intel_atomic_get_crtc_state(struct drm_atomic_state *state,
@@ -1802,6 +1839,20 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
+static inline struct intel_crtc_state *
+intel_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state, &crtc->base);
+
+ if (crtc_state)
+ return to_intel_crtc_state(crtc_state);
+ else
+ return NULL;
+}
+
static inline struct intel_plane_state *
intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
struct intel_plane *plane)
@@ -1823,6 +1874,8 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *intel_state);
/* intel_color.c */
void intel_color_init(struct drm_crtc *crtc);
@@ -1833,4 +1886,16 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
/* intel_lspcon.c */
bool lspcon_init(struct intel_digital_port *intel_dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
+void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+
+/* intel_pipe_crc.c */
+int intel_pipe_crc_create(struct drm_minor *minor);
+void intel_pipe_crc_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt);
+#else
+#define intel_crtc_set_crc_source NULL
+#endif
+extern const struct file_operations i915_display_crc_ctl_fops;
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 5b72c50d6f76..16732e7bc08e 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -340,7 +340,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
/* Dual link goes to DSI transcoder A. */
if (intel_dsi->ports == BIT(PORT_C))
pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
@@ -379,7 +379,8 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
- usleep_range(2, 3);
+ /* at least 2us - relaxed for hrtimer subsystem optimization */
+ usleep_range(10, 50);
/* 3. Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
@@ -441,7 +442,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_device_ready(encoder);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_dsi_device_ready(encoder);
}
@@ -464,7 +465,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -476,7 +477,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
- temp |= intel_crtc->pipe ?
+ if (IS_BROXTON(dev_priv))
+ temp |= LANE_CONFIGURATION_DUAL_LINK_A;
+ else
+ temp |= intel_crtc->pipe ?
LANE_CONFIGURATION_DUAL_LINK_B :
LANE_CONFIGURATION_DUAL_LINK_A;
}
@@ -494,7 +498,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -663,7 +667,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
@@ -695,8 +699,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
usleep_range(2000, 2500);
}
-
- intel_disable_dsi_pll(encoder);
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
@@ -712,6 +714,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
intel_dsi_clear_device_ready(encoder);
+ intel_disable_dsi_pll(encoder);
+
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
@@ -755,12 +759,12 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
+ if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
goto out_put_power;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
+ i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
@@ -785,7 +789,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
continue;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
u32 tmp = I915_READ(MIPI_CTRL(port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
@@ -973,7 +977,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
u32 pclk;
DRM_DEBUG_KMS("\n");
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
@@ -1065,7 +1069,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1152,7 +1156,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
tmp = I915_READ(MIPI_CTRL(port));
@@ -1190,7 +1194,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
tmp |= BXT_DPHY_DEFEATURE_EN;
if (!is_cmd_mode(intel_dsi))
tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
@@ -1241,7 +1245,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
+ if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
@@ -1424,15 +1428,15 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
}
}
-void intel_dsi_init(struct drm_device *dev)
+void intel_dsi_init(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *scan, *fixed_mode = NULL;
- struct drm_i915_private *dev_priv = to_i915(dev);
enum port port;
unsigned int i;
@@ -1444,7 +1448,7 @@ void intel_dsi_init(struct drm_device *dev)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
@@ -1485,7 +1489,7 @@ void intel_dsi_init(struct drm_device *dev)
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
else if (port == PORT_A)
intel_encoder->crtc_mask = BIT(PIPE_A);
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 47cd1b20fb3e..8f683b8b1816 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -29,6 +29,7 @@
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <drm/drm_panel.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
#include <asm/intel-mid.h>
@@ -305,19 +306,44 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->sb_lock);
}
+static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ /* XXX: this table is a quick ugly hack. */
+ static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
+ struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
+
+ if (!gpio_desc) {
+ gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
+ "panel", gpio_index,
+ value ? GPIOD_OUT_LOW :
+ GPIOD_OUT_HIGH);
+
+ if (IS_ERR_OR_NULL(gpio_desc)) {
+ DRM_ERROR("GPIO index %u request failed (%ld)\n",
+ gpio_index, PTR_ERR(gpio_desc));
+ return;
+ }
+
+ bxt_gpio_table[gpio_index] = gpio_desc;
+ }
+
+ gpiod_set_value(gpio_desc, value);
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- u8 gpio_source, gpio_index;
+ u8 gpio_source, gpio_index = 0, gpio_number;
bool value;
DRM_DEBUG_KMS("\n");
if (dev_priv->vbt.dsi.seq_version >= 3)
- data++;
+ gpio_index = *data++;
- gpio_index = *data++;
+ gpio_number = *data++;
/* gpio source in sequence v2 only */
if (dev_priv->vbt.dsi.seq_version == 2)
@@ -329,11 +355,11 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
value = *data++ & 1;
if (IS_VALLEYVIEW(dev_priv))
- vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
else if (IS_CHERRYVIEW(dev_priv))
- chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
else
- DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
+ bxt_exec_gpio(dev_priv, gpio_source, gpio_index, value);
return data;
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 56eff6004bc0..61440e5c2563 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -156,8 +156,10 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
- /* wait at least 0.5 us after ungating before enabling VCO */
- usleep_range(1, 10);
+ /* wait at least 0.5 us after ungating before enabling VCO,
+ * allow hrtimer subsystem optimization by relaxing timing
+ */
+ usleep_range(10, 50);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
@@ -351,7 +353,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
- if (IS_BROXTON(to_i915(encoder->base.dev)))
+ if (IS_GEN9_LP(to_i915(encoder->base.dev)))
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
@@ -504,7 +506,7 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
{
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return bxt_dsi_pll_is_enabled(dev_priv);
MISSING_CASE(INTEL_DEVID(dev_priv));
@@ -519,7 +521,7 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_compute_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
return bxt_compute_dsi_pll(encoder, config);
return -ENODEV;
@@ -532,7 +534,7 @@ void intel_enable_dsi_pll(struct intel_encoder *encoder,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_enable_dsi_pll(encoder, config);
}
@@ -542,7 +544,7 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_disable_dsi_pll(encoder);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_disable_dsi_pll(encoder);
}
@@ -566,7 +568,7 @@ void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_reset_clocks(encoder, port);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 708645443046..50da89dcb92b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -422,9 +422,8 @@ static enum port intel_dvo_port(i915_reg_t dvo_reg)
return PORT_C;
}
-void intel_dvo_init(struct drm_device *dev)
+void intel_dvo_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder;
struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
@@ -511,7 +510,7 @@ void intel_dvo_init(struct drm_device *dev)
continue;
port = intel_dvo_port(dvo->dvo_reg);
- drm_encoder_init(dev, &intel_encoder->base,
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type,
"DVO %c", port_name(port));
@@ -523,14 +522,14 @@ void intel_dvo_init(struct drm_device *dev)
case INTEL_DVO_CHIP_TMDS:
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
(1 << INTEL_OUTPUT_DVO);
- drm_connector_init(dev, connector,
+ drm_connector_init(&dev_priv->drm, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
intel_encoder->cloneable = 0;
- drm_connector_init(dev, connector,
+ drm_connector_init(&dev_priv->drm, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
encoder_type = DRM_MODE_ENCODER_LVDS;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 3da4d466e332..371acf109e34 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -111,13 +111,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/**
* intel_engines_init() - allocate, populate and init the Engine Command Streamers
- * @dev: DRM device.
+ * @dev_priv: i915 device private
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init(struct drm_device *dev)
+int intel_engines_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
unsigned int mask = 0;
@@ -257,7 +256,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
WARN_ON(engine->scratch);
- obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
+ obj = i915_gem_object_create_stolen(engine->i915, size);
if (!obj)
obj = i915_gem_object_create_internal(engine->i915, size);
if (IS_ERR(obj)) {
@@ -265,7 +264,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
return PTR_ERR(obj);
}
- vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
@@ -305,15 +304,30 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
{
int ret;
- ret = intel_engine_init_breadcrumbs(engine);
+ /* We may need to do things with the shrinker which
+ * require us to immediately switch back to the default
+ * context. This can cause a problem as pinning the
+ * default context also requires GTT space which may not
+ * be available. To avoid this we always pin the default
+ * context.
+ */
+ ret = engine->context_pin(engine, engine->i915->kernel_context);
if (ret)
return ret;
+ ret = intel_engine_init_breadcrumbs(engine);
+ if (ret)
+ goto err_unpin;
+
ret = i915_gem_render_state_init(engine);
if (ret)
- return ret;
+ goto err_unpin;
return 0;
+
+err_unpin:
+ engine->context_unpin(engine, engine->i915->kernel_context);
+ return ret;
}
/**
@@ -331,6 +345,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
+
+ engine->context_unpin(engine, engine->i915->kernel_context);
}
u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index f3a1d6a5cabe..89fe5c8464df 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -188,7 +188,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
- if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
+ if (params->fb.format->cpp[0] == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
@@ -235,7 +235,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
int threshold = dev_priv->fbc.threshold;
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
- if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
+ if (params->fb.format->cpp[0] == 2)
threshold++;
switch (threshold) {
@@ -305,7 +305,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
- if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
+ if (params->fb.format->cpp[0] == 2)
threshold++;
switch (threshold) {
@@ -541,7 +541,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
end = ggtt->stolen_size - 8 * 1024 * 1024;
else
- end = ggtt->stolen_usable_size;
+ end = U64_MAX;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
@@ -584,7 +584,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
- fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
+ fb_cpp = fbc->state_cache.fb.format->cpp[0];
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
@@ -754,7 +754,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
if (!cache->plane.visible)
return;
- cache->fb.pixel_format = fb->pixel_format;
+ cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
cache->vma = plane_state->vma;
@@ -812,7 +812,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
+ if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
fbc->no_fbc_reason = "pixel format is invalid";
return false;
}
@@ -883,7 +883,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->crtc.plane = crtc->plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
- params->fb.pixel_format = cache->fb.pixel_format;
+ params->fb.format = cache->fb.format;
params->fb.stride = cache->fb.stride;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
@@ -1284,7 +1284,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
for_each_intel_crtc(&dev_priv->drm, crtc)
if (intel_crtc_active(crtc) &&
- to_intel_plane_state(crtc->base.primary->state)->base.visible)
+ crtc->base.primary->state->visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
@@ -1305,7 +1305,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f4a8c4fc57c4..1b8ba2e77539 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -145,9 +145,9 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
* important and we should probably use that space with FBC or other
* features. */
if (size * 2 < ggtt->stolen_usable_size)
- obj = i915_gem_object_create_stolen(dev, size);
+ obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
- obj = i915_gem_object_create(dev, size);
+ obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = PTR_ERR(obj);
@@ -261,7 +261,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* This driver doesn't need a VT switch to restore the mode on resume */
info->skip_vt_switch = true;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* If the object is shmemfs backed, it will have given us zeroed pages.
@@ -447,7 +447,7 @@ retry:
connector->name);
/* go for command line mode first */
- modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
+ modes[i] = drm_pick_cmdline_mode(fb_conn);
/* try for preferred next */
if (!modes[i]) {
@@ -621,7 +621,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
* rather than the current pipe's, since they differ.
*/
cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
- cur_size = cur_size * fb->base.bits_per_pixel / 8;
+ cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
@@ -632,14 +632,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(dev, cur_size,
- fb->base.pixel_format,
+ fb->base.format->format,
fb->base.modifier);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
- fb->base.bits_per_pixel,
+ fb->base.format->cpp[0] * 8,
cur_size);
if (cur_size > max_size) {
@@ -660,7 +660,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
goto out;
}
- ifbdev->preferred_bpp = fb->base.bits_per_pixel;
+ ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
ifbdev->fb = fb;
drm_framebuffer_reference(&ifbdev->fb->base);
@@ -713,8 +713,7 @@ int intel_fbdev_init(struct drm_device *dev)
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
- ret = drm_fb_helper_init(dev, &ifbdev->helper,
- INTEL_INFO(dev_priv)->num_pipes, 4);
+ ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
if (ret) {
kfree(ifbdev);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 324ea902558b..25691f0e4c50 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -23,15 +23,6 @@
#ifndef _INTEL_GUC_FWIF_H
#define _INTEL_GUC_FWIF_H
-/*
- * This file is partially autogenerated, although currently with some manual
- * fixups afterwards. In future, it should be entirely autogenerated, in order
- * to ensure that the definitions herein remain in sync with those used by the
- * GuC's own firmware.
- *
- * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
- */
-
#define GFXCORE_FAMILY_GEN9 12
#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
@@ -154,7 +145,7 @@
* The GuC firmware layout looks like this:
*
* +-------------------------------+
- * | guc_css_header |
+ * | uc_css_header |
* | |
* | contains major/minor version |
* +-------------------------------+
@@ -181,9 +172,16 @@
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
* in fw. So driver will load a truncated firmware in this case.
+ *
+ * HuC firmware layout is same as GuC firmware.
+ *
+ * HuC firmware css header is different. However, the only difference is where
+ * the version information is saved. The uc_css_header is unified to support
+ * both. Driver should get HuC version from uc_css_header.huc_sw_version, while
+ * uc_css_header.guc_sw_version for GuC.
*/
-struct guc_css_header {
+struct uc_css_header {
uint32_t module_type;
/* header_size includes all non-uCode bits, including css_header, rsa
* key, modulus key and exponent data. */
@@ -214,8 +212,16 @@ struct guc_css_header {
char username[8];
char buildnumber[12];
- uint32_t device_id;
- uint32_t guc_sw_version;
+ union {
+ struct {
+ uint32_t branch_client_version;
+ uint32_t sw_version;
+ } guc;
+ struct {
+ uint32_t sw_version;
+ uint32_t reserved;
+ } huc;
+ };
uint32_t prod_preprod_fw;
uint32_t reserved[12];
uint32_t header_info;
@@ -489,18 +495,19 @@ union guc_log_control {
} __packed;
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
-enum host2guc_action {
- HOST2GUC_ACTION_DEFAULT = 0x0,
- HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
- HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
- HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
- HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
- HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
- HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
- HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
- HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
- HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
- HOST2GUC_ACTION_LIMIT
+enum intel_guc_action {
+ INTEL_GUC_ACTION_DEFAULT = 0x0,
+ INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
+ INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+ INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
+ INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
+ INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
+ INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
+ INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
+ INTEL_GUC_ACTION_LIMIT
};
/*
@@ -509,22 +516,22 @@ enum host2guc_action {
* by the fact that all the MASK bits are set. The remaining bits
* give more detail.
*/
-#define GUC2HOST_RESPONSE_MASK ((u32)0xF0000000)
-#define GUC2HOST_IS_RESPONSE(x) ((u32)(x) >= GUC2HOST_RESPONSE_MASK)
-#define GUC2HOST_STATUS(x) (GUC2HOST_RESPONSE_MASK | (x))
+#define INTEL_GUC_RECV_MASK ((u32)0xF0000000)
+#define INTEL_GUC_RECV_IS_RESPONSE(x) ((u32)(x) >= INTEL_GUC_RECV_MASK)
+#define INTEL_GUC_RECV_STATUS(x) (INTEL_GUC_RECV_MASK | (x))
/* GUC will return status back to SOFT_SCRATCH_O_REG */
-enum guc2host_status {
- GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
- GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
- GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
- GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
+enum intel_guc_status {
+ INTEL_GUC_STATUS_SUCCESS = INTEL_GUC_RECV_STATUS(0x0),
+ INTEL_GUC_STATUS_ALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x10),
+ INTEL_GUC_STATUS_DEALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x20),
+ INTEL_GUC_STATUS_GENERIC_FAIL = INTEL_GUC_RECV_STATUS(0x0000F000)
};
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
-enum guc2host_message {
- GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1),
- GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3)
+enum intel_guc_recv_message {
+ INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
+ INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3)
};
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 34d6ad2cf7c1..2f1cf9aea04e 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -28,7 +28,7 @@
*/
#include <linux/firmware.h>
#include "i915_drv.h"
-#include "intel_guc.h"
+#include "intel_uc.h"
/**
* DOC: GuC-specific firmware loader
@@ -51,12 +51,6 @@
* 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
* used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
*
- * Firmware log:
- * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
- * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
- * i915_guc_load_status will print out firmware loading status and scratch
- * registers value.
- *
*/
#define SKL_FW_MAJOR 6
@@ -81,16 +75,16 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
/* User-friendly representation of an enum */
-const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
{
switch (status) {
- case GUC_FIRMWARE_FAIL:
+ case INTEL_UC_FIRMWARE_FAIL:
return "FAIL";
- case GUC_FIRMWARE_NONE:
+ case INTEL_UC_FIRMWARE_NONE:
return "NONE";
- case GUC_FIRMWARE_PENDING:
+ case INTEL_UC_FIRMWARE_PENDING:
return "PENDING";
- case GUC_FIRMWARE_SUCCESS:
+ case INTEL_UC_FIRMWARE_SUCCESS:
return "SUCCESS";
default:
return "UNKNOWN!";
@@ -220,14 +214,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
if (guc->ads_vma) {
- u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
+ u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
}
/* If GuC submission is enabled, set up additional parameters here */
if (i915.enable_guc_submission) {
- u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma);
+ u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
pgs >>= PAGE_SHIFT;
@@ -278,7 +272,7 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
struct i915_vma *vma)
{
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
unsigned long offset;
struct sg_table *sg = vma->pages;
u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
@@ -297,7 +291,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
- offset = i915_ggtt_offset(vma) + guc_fw->header_offset;
+ offset = guc_ggtt_offset(vma) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
@@ -334,12 +328,12 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
return ret;
}
-static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
{
u32 wopcm_size = GUC_WOPCM_TOP;
/* On BXT, the top of WOPCM is reserved for RC6 context */
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
return wopcm_size;
@@ -350,29 +344,27 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
*/
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
struct i915_vma *vma;
int ret;
- ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(guc_fw->obj, false);
if (ret) {
DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
return ret;
}
- vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(guc_fw->obj, NULL, 0, 0,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (IS_ERR(vma)) {
DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
return PTR_ERR(vma);
}
- /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
- I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* init WOPCM */
- I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
+ I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
/* Enable MIA caching. GuC clock gating is disabled. */
@@ -388,7 +380,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
@@ -437,7 +429,7 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv)
/**
* intel_guc_setup() - finish preparing the GuC for activity
- * @dev: drm device
+ * @dev_priv: i915 device private
*
* Called from gem_init_hw() during driver loading and also after a GPU reset.
*
@@ -448,17 +440,16 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv)
*
* Return: non-zero code on error
*/
-int intel_guc_setup(struct drm_device *dev)
+int intel_guc_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- const char *fw_path = guc_fw->guc_fw_path;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ const char *fw_path = guc_fw->path;
int retries, ret, err;
DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
fw_path,
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
- intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+ intel_uc_fw_status_repr(guc_fw->fetch_status),
+ intel_uc_fw_status_repr(guc_fw->load_status));
/* Loading forbidden, or no firmware to load? */
if (!i915.enable_guc_loading) {
@@ -476,10 +467,10 @@ int intel_guc_setup(struct drm_device *dev)
}
/* Fetch failed, or already fetched but failed to load? */
- if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
+ if (guc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) {
err = -EIO;
goto fail;
- } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
+ } else if (guc_fw->load_status == INTEL_UC_FIRMWARE_FAIL) {
err = -ENOEXEC;
goto fail;
}
@@ -487,11 +478,14 @@ int intel_guc_setup(struct drm_device *dev)
guc_interrupts_release(dev_priv);
gen9_reset_guc_interrupts(dev_priv);
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(dev_priv);
+
+ guc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
- intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+ intel_uc_fw_status_repr(guc_fw->fetch_status),
+ intel_uc_fw_status_repr(guc_fw->load_status));
err = i915_guc_submission_init(dev_priv);
if (err)
@@ -512,6 +506,7 @@ int intel_guc_setup(struct drm_device *dev)
if (err)
goto fail;
+ intel_huc_load(dev_priv);
err = guc_ucode_xfer(dev_priv);
if (!err)
break;
@@ -523,11 +518,13 @@ int intel_guc_setup(struct drm_device *dev)
"retry %d more time(s)\n", err, retries);
}
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
+ guc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
- intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+ intel_uc_fw_status_repr(guc_fw->fetch_status),
+ intel_uc_fw_status_repr(guc_fw->load_status));
+
+ intel_guc_auth_huc(dev_priv);
if (i915.enable_guc_submission) {
if (i915.guc_log_level >= 0)
@@ -542,12 +539,13 @@ int intel_guc_setup(struct drm_device *dev)
return 0;
fail:
- if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
+ if (guc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
+ guc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
guc_interrupts_release(dev_priv);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
+ i915_ggtt_disable_guc(dev_priv);
/*
* We've failed to load the firmware :(
@@ -588,141 +586,156 @@ fail:
return ret;
}
-static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_gem_object *obj;
- const struct firmware *fw;
- struct guc_css_header *css;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
size_t size;
int err;
- DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+ DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
- err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
+ err = request_firmware(&fw, uc_fw->path, &pdev->dev);
if (err)
goto fail;
if (!fw)
goto fail;
- DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
- guc_fw->guc_fw_path, fw);
+ DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
+ uc_fw->path, fw);
/* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct guc_css_header)) {
+ if (fw->size < sizeof(struct uc_css_header)) {
DRM_NOTE("Firmware header is missing\n");
goto fail;
}
- css = (struct guc_css_header *)fw->data;
+ css = (struct uc_css_header *)fw->data;
/* Firmware bits always start from header */
- guc_fw->header_offset = 0;
- guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+ uc_fw->header_offset = 0;
+ uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
- if (guc_fw->header_size != sizeof(struct guc_css_header)) {
+ if (uc_fw->header_size != sizeof(struct uc_css_header)) {
DRM_NOTE("CSS header definition mismatch\n");
goto fail;
}
/* then, uCode */
- guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
- guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+ uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
DRM_NOTE("RSA key size is bad\n");
goto fail;
}
- guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
- guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+ uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
- size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
+ size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
if (fw->size < size) {
DRM_NOTE("Missing firmware components\n");
goto fail;
}
- /* Header and uCode will be loaded to WOPCM. Size of the two. */
- size = guc_fw->header_size + guc_fw->ucode_size;
- if (size > guc_wopcm_size(to_i915(dev))) {
- DRM_NOTE("Firmware is too large to fit in WOPCM\n");
- goto fail;
- }
-
/*
* The GuC firmware image has the version number embedded at a well-known
* offset within the firmware blob; note that major / minor version are
* TWO bytes each (i.e. u16), although all pointers and offsets are defined
* in terms of bytes (u8).
*/
- guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
- guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
-
- if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
- guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
- DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
- guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
- guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+ switch (uc_fw->fw) {
+ case INTEL_UC_FW_TYPE_GUC:
+ /* Header and uCode will be loaded to WOPCM. Size of the two. */
+ size = uc_fw->header_size + uc_fw->ucode_size;
+
+ /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
+ if (size > intel_guc_wopcm_size(dev_priv)) {
+ DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+ goto fail;
+ }
+ uc_fw->major_ver_found = css->guc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+ break;
+
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->major_ver_found = css->huc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+ break;
+
+ default:
+ DRM_ERROR("Unknown firmware type %d\n", uc_fw->fw);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
err = -ENOEXEC;
goto fail;
}
DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
- guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
- guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- mutex_lock(&dev->struct_mutex);
- obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
- mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
if (IS_ERR_OR_NULL(obj)) {
err = obj ? PTR_ERR(obj) : -ENOMEM;
goto fail;
}
- guc_fw->guc_fw_obj = obj;
- guc_fw->guc_fw_size = fw->size;
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
- DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
- guc_fw->guc_fw_obj);
+ DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
+ uc_fw->obj);
release_firmware(fw);
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
return;
fail:
- DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
- guc_fw->guc_fw_path, err);
- DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
- err, fw, guc_fw->guc_fw_obj);
+ DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
+ uc_fw->path, err);
+ DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+ err, fw, uc_fw->obj);
- mutex_lock(&dev->struct_mutex);
- obj = guc_fw->guc_fw_obj;
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ obj = uc_fw->obj;
if (obj)
i915_gem_object_put(obj);
- guc_fw->guc_fw_obj = NULL;
- mutex_unlock(&dev->struct_mutex);
+ uc_fw->obj = NULL;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
release_firmware(fw); /* OK even if fw is NULL */
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
}
/**
* intel_guc_init() - define parameters and fetch firmware
- * @dev: drm device
+ * @dev_priv: i915 device private
*
* Called early during driver load, but after GEM is initialised.
*
* The firmware will be transferred to the GuC's memory later,
* when intel_guc_setup() is called.
*/
-void intel_guc_init(struct drm_device *dev)
+void intel_guc_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
const char *fw_path;
if (!HAS_GUC(dev_priv)) {
@@ -740,24 +753,23 @@ void intel_guc_init(struct drm_device *dev)
fw_path = NULL;
} else if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
- guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
+ guc_fw->major_ver_wanted = SKL_FW_MAJOR;
+ guc_fw->minor_ver_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
fw_path = I915_BXT_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
- guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
+ guc_fw->major_ver_wanted = BXT_FW_MAJOR;
+ guc_fw->minor_ver_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv)) {
fw_path = I915_KBL_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
- guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
+ guc_fw->major_ver_wanted = KBL_FW_MAJOR;
+ guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else {
fw_path = ""; /* unknown device */
}
- guc_fw->guc_dev = dev;
- guc_fw->guc_fw_path = fw_path;
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
+ guc_fw->path = fw_path;
+ guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+ guc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
/* Early (and silent) return if GuC loading is disabled */
if (!i915.enable_guc_loading)
@@ -767,30 +779,29 @@ void intel_guc_init(struct drm_device *dev)
if (*fw_path == '\0')
return;
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
+ guc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
- guc_fw_fetch(dev, guc_fw);
+ intel_uc_fw_fetch(dev_priv, guc_fw);
/* status must now be FAIL or SUCCESS */
}
/**
* intel_guc_fini() - clean up all allocated resources
- * @dev: drm device
+ * @dev_priv: i915 device private
*/
-void intel_guc_fini(struct drm_device *dev)
+void intel_guc_fini(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
guc_interrupts_release(dev_priv);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
- if (guc_fw->guc_fw_obj)
- i915_gem_object_put(guc_fw->guc_fw_obj);
- guc_fw->guc_fw_obj = NULL;
- mutex_unlock(&dev->struct_mutex);
+ if (guc_fw->obj)
+ i915_gem_object_put(guc_fw->obj);
+ guc_fw->obj = NULL;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
+ guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
new file mode 100644
index 000000000000..5c0f9a49da0e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include "i915_drv.h"
+
+static void guc_log_capture_logs(struct intel_guc *guc);
+
+/**
+ * DOC: GuC firmware log
+ *
+ * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
+ * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
+ * i915_guc_load_status will print out firmware loading status and scratch
+ * registers value.
+ *
+ */
+
+static int guc_log_flush_complete(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static int guc_log_flush(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
+ 0
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static int guc_log_control(struct intel_guc *guc, u32 control_val)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
+ control_val
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+
+/*
+ * Sub buffer switch callback. Called whenever relay has to switch to a new
+ * sub buffer, relay stays on the same sub buffer if 0 is returned.
+ */
+static int subbuf_start_callback(struct rchan_buf *buf,
+ void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding)
+{
+ /* Use no-overwrite mode by default, where relay will stop accepting
+ * new data if there are no empty sub buffers left.
+ * There is no strict synchronization enforced by relay between Consumer
+ * and Producer. In overwrite mode, there is a possibility of getting
+ * inconsistent/garbled data, the producer could be writing on to the
+ * same sub buffer from which Consumer is reading. This can't be avoided
+ * unless Consumer is fast enough and can always run in tandem with
+ * Producer.
+ */
+ if (relay_buf_full(buf))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * file_create() callback. Creates relay file in debugfs.
+ */
+static struct dentry *create_buf_file_callback(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ /* This to enable the use of a single buffer for the relay channel and
+ * correspondingly have a single file exposed to User, through which
+ * it can collect the logs in order without any post-processing.
+ * Need to set 'is_global' even if parent is NULL for early logging.
+ */
+ *is_global = 1;
+
+ if (!parent)
+ return NULL;
+
+ /* Not using the channel filename passed as an argument, since for each
+ * channel relay appends the corresponding CPU number to the filename
+ * passed in relay_open(). This should be fine as relay just needs a
+ * dentry of the file associated with the channel buffer and that file's
+ * name need not be same as the filename passed as an argument.
+ */
+ buf_file = debugfs_create_file("guc_log", mode,
+ parent, buf, &relay_file_operations);
+ return buf_file;
+}
+
+/*
+ * file_remove() default callback. Removes relay file in debugfs.
+ */
+static int remove_buf_file_callback(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+/* relay channel callbacks */
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = subbuf_start_callback,
+ .create_buf_file = create_buf_file_callback,
+ .remove_buf_file = remove_buf_file_callback,
+};
+
+static void guc_log_remove_relay_file(struct intel_guc *guc)
+{
+ relay_close(guc->log.relay_chan);
+}
+
+static int guc_log_create_relay_channel(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+
+ /* Keep the size of sub buffers same as shared log buffer */
+ subbuf_size = guc->log.vma->obj->base.size;
+
+ /* Store up to 8 snapshots, which is large enough to buffer sufficient
+ * boot time logs and provides enough leeway to User, in terms of
+ * latency, for consuming the logs from relay. Also doesn't take
+ * up too much memory.
+ */
+ n_subbufs = 8;
+
+ guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+ n_subbufs, &relay_callbacks, dev_priv);
+ if (!guc_log_relay_chan) {
+ DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+ return -ENOMEM;
+ }
+
+ GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+ guc->log.relay_chan = guc_log_relay_chan;
+ return 0;
+}
+
+static int guc_log_create_relay_file(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct dentry *log_dir;
+ int ret;
+
+ /* For now create the log file in /sys/kernel/debug/dri/0 dir */
+ log_dir = dev_priv->drm.primary->debugfs_root;
+
+ /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
+ * not mounted and so can't create the relay file.
+ * The relay API seems to fit well with debugfs only, for availing relay
+ * there are 3 requirements which can be met for debugfs file only in a
+ * straightforward/clean manner :-
+ * i) Need the associated dentry pointer of the file, while opening the
+ * relay channel.
+ * ii) Should be able to use 'relay_file_operations' fops for the file.
+ * iii) Set the 'i_private' field of file's inode to the pointer of
+ * relay channel buffer.
+ */
+ if (!log_dir) {
+ DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
+ return -ENODEV;
+ }
+
+ ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
+ if (ret) {
+ DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void guc_move_to_next_buf(struct intel_guc *guc)
+{
+ /* Make sure the updates made in the sub buffer are visible when
+ * Consumer sees the following update to offset inside the sub buffer.
+ */
+ smp_wmb();
+
+ /* All data has been written, so now move the offset of sub buffer. */
+ relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+
+ /* Switch to the next sub buffer */
+ relay_flush(guc->log.relay_chan);
+}
+
+static void *guc_get_write_buffer(struct intel_guc *guc)
+{
+ if (!guc->log.relay_chan)
+ return NULL;
+
+ /* Just get the base address of a new sub buffer and copy data into it
+ * ourselves. NULL will be returned in no-overwrite mode, if all sub
+ * buffers are full. Could have used the relay_write() to indirectly
+ * copy the data, but that would have been bit convoluted, as we need to
+ * write to only certain locations inside a sub buffer which cannot be
+ * done without using relay_reserve() along with relay_write(). So its
+ * better to use relay_reserve() alone.
+ */
+ return relay_reserve(guc->log.relay_chan, 0);
+}
+
+static bool guc_check_log_buf_overflow(struct intel_guc *guc,
+ enum guc_log_buffer_type type,
+ unsigned int full_cnt)
+{
+ unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
+ bool overflow = false;
+
+ if (full_cnt != prev_full_cnt) {
+ overflow = true;
+
+ guc->log.prev_overflow_count[type] = full_cnt;
+ guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
+
+ if (full_cnt < prev_full_cnt) {
+ /* buffer_full_cnt is a 4 bit counter */
+ guc->log.total_overflow_count[type] += 16;
+ }
+ DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
+ }
+
+ return overflow;
+}
+
+static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_ISR_LOG_BUFFER:
+ return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+ case GUC_DPC_LOG_BUFFER:
+ return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+ default:
+ MISSING_CASE(type);
+ }
+
+ return 0;
+}
+
+static void guc_read_update_log_buffer(struct intel_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
+ struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
+ struct guc_log_buffer_state log_buf_state_local;
+ enum guc_log_buffer_type type;
+ void *src_data, *dst_data;
+ bool new_overflow;
+
+ if (WARN_ON(!guc->log.buf_addr))
+ return;
+
+ /* Get the pointer to shared GuC log buffer */
+ log_buf_state = src_data = guc->log.buf_addr;
+
+ /* Get the pointer to local buffer to store the logs */
+ log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+
+ /* Actual logs are present from the 2nd page */
+ src_data += PAGE_SIZE;
+ dst_data += PAGE_SIZE;
+
+ for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ /* Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ memcpy(&log_buf_state_local, log_buf_state,
+ sizeof(struct guc_log_buffer_state));
+ buffer_size = guc_get_log_buffer_size(type);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_cnt = log_buf_state_local.buffer_full_cnt;
+
+ /* Bookkeeping stuff */
+ guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
+ new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
+
+ /* Update the state of shared log buffer */
+ log_buf_state->read_ptr = write_offset;
+ log_buf_state->flush_to_file = 0;
+ log_buf_state++;
+
+ if (unlikely(!log_buf_snapshot_state))
+ continue;
+
+ /* First copy the state structure in snapshot buffer */
+ memcpy(log_buf_snapshot_state, &log_buf_state_local,
+ sizeof(struct guc_log_buffer_state));
+
+ /* The write pointer could have been updated by GuC firmware,
+ * after sending the flush interrupt to Host, for consistency
+ * set write pointer value to same value of sampled_write_ptr
+ * in the snapshot buffer.
+ */
+ log_buf_snapshot_state->write_ptr = write_offset;
+ log_buf_snapshot_state++;
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ DRM_ERROR("invalid log buffer state\n");
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ /* Just copy the newly written data */
+ if (read_offset > write_offset) {
+ i915_memcpy_from_wc(dst_data, src_data, write_offset);
+ bytes_to_copy = buffer_size - read_offset;
+ } else {
+ bytes_to_copy = write_offset - read_offset;
+ }
+ i915_memcpy_from_wc(dst_data + read_offset,
+ src_data + read_offset, bytes_to_copy);
+
+ src_data += buffer_size;
+ dst_data += buffer_size;
+ }
+
+ if (log_buf_snapshot_state)
+ guc_move_to_next_buf(guc);
+ else {
+ /* Used rate limited to avoid deluge of messages, logs might be
+ * getting consumed by User at a slow rate.
+ */
+ DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+ guc->log.capture_miss_count++;
+ }
+}
+
+static void guc_log_cleanup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* First disable the flush interrupt */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ if (guc->log.flush_wq)
+ destroy_workqueue(guc->log.flush_wq);
+
+ guc->log.flush_wq = NULL;
+
+ if (guc->log.relay_chan)
+ guc_log_remove_relay_file(guc);
+
+ guc->log.relay_chan = NULL;
+
+ if (guc->log.buf_addr)
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+
+ guc->log.buf_addr = NULL;
+}
+
+static void capture_logs_work(struct work_struct *work)
+{
+ struct intel_guc *guc =
+ container_of(work, struct intel_guc, log.flush_work);
+
+ guc_log_capture_logs(guc);
+}
+
+static int guc_log_create_extras(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ void *vaddr;
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* Nothing to do */
+ if (i915.guc_log_level < 0)
+ return 0;
+
+ if (!guc->log.buf_addr) {
+ /* Create a WC (Uncached for read) vmalloc mapping of log
+ * buffer pages, so that we can directly get the data
+ * (up-to-date) from memory.
+ */
+ vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ return ret;
+ }
+
+ guc->log.buf_addr = vaddr;
+ }
+
+ if (!guc->log.relay_chan) {
+ /* Create a relay channel, so that we have buffers for storing
+ * the GuC firmware logs, the channel will be linked with a file
+ * later on when debugfs is registered.
+ */
+ ret = guc_log_create_relay_channel(guc);
+ if (ret)
+ return ret;
+ }
+
+ if (!guc->log.flush_wq) {
+ INIT_WORK(&guc->log.flush_work, capture_logs_work);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (guc->log.flush_wq == NULL) {
+ DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void intel_guc_log_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ unsigned long offset;
+ uint32_t size, flags;
+
+ if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
+ i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+
+ /* The first page is to save log buffer state. Allocate one
+ * extra page for others in case for overlap */
+ size = (1 + GUC_LOG_DPC_PAGES + 1 +
+ GUC_LOG_ISR_PAGES + 1 +
+ GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+
+ vma = guc->log.vma;
+ if (!vma) {
+ /* We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submisssions.
+ */
+ if (WARN_ON(!i915_has_memcpy_from_wc())) {
+ /* logging will not be enabled */
+ i915.guc_log_level = -1;
+ return;
+ }
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma)) {
+ /* logging will be off */
+ i915.guc_log_level = -1;
+ return;
+ }
+
+ guc->log.vma = vma;
+
+ if (guc_log_create_extras(guc)) {
+ guc_log_cleanup(guc);
+ i915_vma_unpin_and_release(&guc->log.vma);
+ i915.guc_log_level = -1;
+ return;
+ }
+ }
+
+ /* each allocated unit is a page */
+ flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
+ (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
+ (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
+ (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+
+ offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
+ guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+}
+
+static int guc_log_late_setup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (i915.guc_log_level < 0)
+ return -EINVAL;
+
+ /* If log_level was set as -1 at boot time, then setup needed to
+ * handle log buffer flush interrupts would not have been done yet,
+ * so do that now.
+ */
+ ret = guc_log_create_extras(guc);
+ if (ret)
+ goto err;
+
+ ret = guc_log_create_relay_file(guc);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ guc_log_cleanup(guc);
+ /* logging will remain off */
+ i915.guc_log_level = -1;
+ return ret;
+}
+
+static void guc_log_capture_logs(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ guc_read_update_log_buffer(guc);
+
+ /* Generally device is expected to be active only at this
+ * time, so get/put should be really quick.
+ */
+ intel_runtime_pm_get(dev_priv);
+ guc_log_flush_complete(guc);
+ intel_runtime_pm_put(dev_priv);
+}
+
+static void guc_flush_logs(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+ return;
+
+ /* First disable the interrupts, will be renabled afterwards */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ /* Before initiating the forceful flush, wait for any pending/ongoing
+ * flush to complete otherwise forceful flush may not actually happen.
+ */
+ flush_work(&guc->log.flush_work);
+
+ /* Ask GuC to update the log buffer state */
+ guc_log_flush(guc);
+
+ /* GuC would have updated log buffer by now, so capture it */
+ guc_log_capture_logs(guc);
+}
+
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ union guc_log_control log_param;
+ int ret;
+
+ log_param.value = control_val;
+
+ if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
+ log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
+ return -EINVAL;
+
+ /* This combination doesn't make sense & won't have any effect */
+ if (!log_param.logging_enabled && (i915.guc_log_level < 0))
+ return 0;
+
+ ret = guc_log_control(guc, log_param.value);
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
+ return ret;
+ }
+
+ i915.guc_log_level = log_param.verbosity;
+
+ /* If log_level was set as -1 at boot time, then the relay channel file
+ * wouldn't have been created by now and interrupts also would not have
+ * been enabled.
+ */
+ if (!dev_priv->guc.log.relay_chan) {
+ ret = guc_log_late_setup(guc);
+ if (!ret)
+ gen9_enable_guc_interrupts(dev_priv);
+ } else if (!log_param.logging_enabled) {
+ /* Once logging is disabled, GuC won't generate logs & send an
+ * interrupt. But there could be some data in the log buffer
+ * which is yet to be captured. So request GuC to update the log
+ * buffer state and then collect the left over logs.
+ */
+ guc_flush_logs(guc);
+
+ /* As logging is disabled, update log level to reflect that */
+ i915.guc_log_level = -1;
+ } else {
+ /* In case interrupts were disabled, enable them now */
+ gen9_enable_guc_interrupts(dev_priv);
+ }
+
+ return ret;
+}
+
+void i915_guc_log_register(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_late_setup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_cleanup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 290384e86c63..d23c0fcff751 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -67,6 +67,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
}
+ if (intel_vgpu_active(dev_priv)) {
+ DRM_DEBUG_DRIVER("GVT-g is disabled for guest\n");
+ goto bail;
+ }
+
if (!is_supported_device(dev_priv)) {
DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
goto bail;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 53df5b11bff4..f05971f5586f 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -236,13 +236,13 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
memset(&engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
- return HANGCHECK_ACTIVE;
+ return ENGINE_ACTIVE_HEAD;
}
if (!subunits_stuck(engine))
- return HANGCHECK_ACTIVE;
+ return ENGINE_ACTIVE_SUBUNITS;
- return HANGCHECK_HUNG;
+ return ENGINE_DEAD;
}
static enum intel_engine_hangcheck_action
@@ -253,11 +253,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
u32 tmp;
ha = head_stuck(engine, acthd);
- if (ha != HANGCHECK_HUNG)
+ if (ha != ENGINE_DEAD)
return ha;
if (IS_GEN2(dev_priv))
- return HANGCHECK_HUNG;
+ return ENGINE_DEAD;
/* Is the chip hanging on a WAIT_FOR_EVENT?
* If so we can simply poke the RB_WAIT bit
@@ -270,25 +270,144 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
"Kicking stuck wait on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
+ return ENGINE_WAIT_KICK;
}
if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(engine)) {
default:
- return HANGCHECK_HUNG;
+ return ENGINE_DEAD;
case 1:
i915_handle_error(dev_priv, 0,
"Kicking stuck semaphore on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
+ return ENGINE_WAIT_KICK;
case 0:
- return HANGCHECK_WAIT;
+ return ENGINE_WAIT;
}
}
- return HANGCHECK_HUNG;
+ return ENGINE_DEAD;
+}
+
+static void hangcheck_load_sample(struct intel_engine_cs *engine,
+ struct intel_engine_hangcheck *hc)
+{
+ /* We don't strictly need an irq-barrier here, as we are not
+ * serving an interrupt request, be paranoid in case the
+ * barrier has side-effects (such as preventing a broken
+ * cacheline snoop) and so be sure that we can see the seqno
+ * advance. If the seqno should stick, due to a stale
+ * cacheline, we would erroneously declare the GPU hung.
+ */
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ hc->acthd = intel_engine_get_active_head(engine);
+ hc->seqno = intel_engine_get_seqno(engine);
+}
+
+static void hangcheck_store_sample(struct intel_engine_cs *engine,
+ const struct intel_engine_hangcheck *hc)
+{
+ engine->hangcheck.acthd = hc->acthd;
+ engine->hangcheck.seqno = hc->seqno;
+ engine->hangcheck.action = hc->action;
+ engine->hangcheck.stalled = hc->stalled;
+}
+
+static enum intel_engine_hangcheck_action
+hangcheck_get_action(struct intel_engine_cs *engine,
+ const struct intel_engine_hangcheck *hc)
+{
+ if (engine->hangcheck.seqno != hc->seqno)
+ return ENGINE_ACTIVE_SEQNO;
+
+ if (i915_seqno_passed(hc->seqno, intel_engine_last_submit(engine)))
+ return ENGINE_IDLE;
+
+ return engine_stuck(engine, hc->acthd);
+}
+
+static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
+ struct intel_engine_hangcheck *hc)
+{
+ unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
+
+ hc->action = hangcheck_get_action(engine, hc);
+
+ /* We always increment the progress
+ * if the engine is busy and still processing
+ * the same request, so that no single request
+ * can run indefinitely (such as a chain of
+ * batches). The only time we do not increment
+ * the hangcheck score on this ring, if this
+ * engine is in a legitimate wait for another
+ * engine. In that case the waiting engine is a
+ * victim and we want to be sure we catch the
+ * right culprit. Then every time we do kick
+ * the ring, make it as a progress as the seqno
+ * advancement might ensure and if not, it
+ * will catch the hanging engine.
+ */
+
+ switch (hc->action) {
+ case ENGINE_IDLE:
+ case ENGINE_ACTIVE_SEQNO:
+ /* Clear head and subunit states on seqno movement */
+ hc->acthd = 0;
+
+ memset(&engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
+
+ /* Intentional fall through */
+ case ENGINE_WAIT_KICK:
+ case ENGINE_WAIT:
+ engine->hangcheck.action_timestamp = jiffies;
+ break;
+
+ case ENGINE_ACTIVE_HEAD:
+ case ENGINE_ACTIVE_SUBUNITS:
+ /* Seqno stuck with still active engine gets leeway,
+ * in hopes that it is just a long shader.
+ */
+ timeout = I915_SEQNO_DEAD_TIMEOUT;
+ break;
+
+ case ENGINE_DEAD:
+ break;
+
+ default:
+ MISSING_CASE(hc->action);
+ }
+
+ hc->stalled = time_after(jiffies,
+ engine->hangcheck.action_timestamp + timeout);
+}
+
+static void hangcheck_declare_hang(struct drm_i915_private *i915,
+ unsigned int hung,
+ unsigned int stuck)
+{
+ struct intel_engine_cs *engine;
+ char msg[80];
+ unsigned int tmp;
+ int len;
+
+ /* If some rings hung but others were still busy, only
+ * blame the hanging rings in the synopsis.
+ */
+ if (stuck != hung)
+ hung &= ~stuck;
+ len = scnprintf(msg, sizeof(msg),
+ "%s on ", stuck == hung ? "No progress" : "Hang");
+ for_each_engine_masked(engine, i915, hung, tmp)
+ len += scnprintf(msg + len, sizeof(msg) - len,
+ "%s, ", engine->name);
+ msg[len-2] = '\0';
+
+ return i915_handle_error(i915, hung, msg);
}
/*
@@ -308,10 +427,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
enum intel_engine_id id;
unsigned int hung = 0, stuck = 0;
int busy_count = 0;
-#define BUSY 1
-#define KICK 5
-#define HUNG 20
-#define ACTIVE_DECAY 15
if (!i915.enable_hangcheck)
return;
@@ -319,6 +434,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return;
+
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
@@ -326,112 +444,26 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
- bool busy = intel_engine_has_waiter(engine);
- u64 acthd;
- u32 seqno;
- u32 submit;
+ struct intel_engine_hangcheck cur_state, *hc = &cur_state;
+ const bool busy = intel_engine_has_waiter(engine);
semaphore_clear_deadlocks(dev_priv);
- /* We don't strictly need an irq-barrier here, as we are not
- * serving an interrupt request, be paranoid in case the
- * barrier has side-effects (such as preventing a broken
- * cacheline snoop) and so be sure that we can see the seqno
- * advance. If the seqno should stick, due to a stale
- * cacheline, we would erroneously declare the GPU hung.
- */
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- acthd = intel_engine_get_active_head(engine);
- seqno = intel_engine_get_seqno(engine);
- submit = intel_engine_last_submit(engine);
-
- if (engine->hangcheck.seqno == seqno) {
- if (i915_seqno_passed(seqno, submit)) {
- engine->hangcheck.action = HANGCHECK_IDLE;
- } else {
- /* We always increment the hangcheck score
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, add a small increment to the
- * score so that we can catch a batch that is
- * being repeatedly kicked and so responsible
- * for stalling the machine.
- */
- engine->hangcheck.action =
- engine_stuck(engine, acthd);
-
- switch (engine->hangcheck.action) {
- case HANGCHECK_IDLE:
- case HANGCHECK_WAIT:
- break;
- case HANGCHECK_ACTIVE:
- engine->hangcheck.score += BUSY;
- break;
- case HANGCHECK_KICK:
- engine->hangcheck.score += KICK;
- break;
- case HANGCHECK_HUNG:
- engine->hangcheck.score += HUNG;
- break;
- }
- }
-
- if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
- hung |= intel_engine_flag(engine);
- if (engine->hangcheck.action != HANGCHECK_HUNG)
- stuck |= intel_engine_flag(engine);
- }
- } else {
- engine->hangcheck.action = HANGCHECK_ACTIVE;
-
- /* Gradually reduce the count so that we catch DoS
- * attempts across multiple batches.
- */
- if (engine->hangcheck.score > 0)
- engine->hangcheck.score -= ACTIVE_DECAY;
- if (engine->hangcheck.score < 0)
- engine->hangcheck.score = 0;
-
- /* Clear head and subunit states on seqno movement */
- acthd = 0;
-
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
+ hangcheck_load_sample(engine, hc);
+ hangcheck_accumulate_sample(engine, hc);
+ hangcheck_store_sample(engine, hc);
+
+ if (engine->hangcheck.stalled) {
+ hung |= intel_engine_flag(engine);
+ if (hc->action != ENGINE_DEAD)
+ stuck |= intel_engine_flag(engine);
}
- engine->hangcheck.seqno = seqno;
- engine->hangcheck.acthd = acthd;
busy_count += busy;
}
- if (hung) {
- char msg[80];
- unsigned int tmp;
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "No progress" : "Hang");
- for_each_engine_masked(engine, dev_priv, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return i915_handle_error(dev_priv, hung, msg);
- }
+ if (hung)
+ hangcheck_declare_hang(dev_priv, hung, stuck);
/* Reset timer in case GPU hangs without another request being added */
if (busy_count)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 02d50e334ac6..ebae2bd83918 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -134,6 +134,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
static void g4x_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
{
@@ -188,13 +189,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -247,13 +249,14 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -304,13 +307,14 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -362,14 +366,14 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
i915_reg_t data_reg;
int i;
@@ -426,6 +430,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
* bytes by one.
*/
static void intel_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
union hdmi_infoframe *frame)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -444,14 +449,15 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
buffer[3] = 0;
len++;
- intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
+ intel_hdmi->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
union hdmi_infoframe frame;
int ret;
@@ -462,19 +468,17 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
return;
}
- if (intel_hdmi->rgb_quant_range_selectable) {
- if (intel_crtc->config->limited_color_range)
- frame.avi.quantization_range =
- HDMI_QUANTIZATION_RANGE_LIMITED;
- else
- frame.avi.quantization_range =
- HDMI_QUANTIZATION_RANGE_FULL;
- }
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL,
+ intel_hdmi->rgb_quant_range_selectable);
- intel_write_infoframe(encoder, &frame);
+ intel_write_infoframe(encoder, crtc_state, &frame);
}
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
union hdmi_infoframe frame;
int ret;
@@ -487,27 +491,28 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
frame.spd.sdi = HDMI_SPD_SDI_PC;
- intel_write_infoframe(encoder, &frame);
+ intel_write_infoframe(encoder, crtc_state, &frame);
}
static void
intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state)
{
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
- adjusted_mode);
+ &crtc_state->base.adjusted_mode);
if (ret < 0)
return;
- intel_write_infoframe(encoder, &frame);
+ intel_write_infoframe(encoder, crtc_state, &frame);
}
static void g4x_set_infoframes(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -561,28 +566,22 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
- intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_spd_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
}
-static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
+static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ struct drm_connector *connector = conn_state->connector;
/*
* HDMI cloning is only supported on g4x which doesn't
* support deep color or GCP infoframes anyway so no
* need to worry about multiple HDMI sinks here.
*/
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- return connector->display_info.bpc > 8;
- return false;
+ return connector->display_info.bpc > 8;
}
/*
@@ -628,15 +627,17 @@ static bool gcp_default_phase_possible(int pipe_bpp,
mode->crtc_htotal/2 % pixels_per_group == 0);
}
-static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
+static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg;
u32 val = 0;
if (HAS_DDI(dev_priv))
- reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
+ reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -645,12 +646,12 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
return false;
/* Indicate color depth whenever the sink supports deep color */
- if (hdmi_sink_is_deep_color(encoder))
+ if (hdmi_sink_is_deep_color(conn_state))
val |= GCP_COLOR_INDICATION;
/* Enable default_phase whenever the display mode is suitably aligned */
- if (gcp_default_phase_possible(crtc->config->pipe_bpp,
- &crtc->config->base.adjusted_mode))
+ if (gcp_default_phase_possible(crtc_state->pipe_bpp,
+ &crtc_state->base.adjusted_mode))
val |= GCP_DEFAULT_PHASE_ENABLE;
I915_WRITE(reg, val);
@@ -660,10 +661,11 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
static void ibx_set_infoframes(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -699,23 +701,24 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- if (intel_hdmi_set_gcp_infoframe(encoder))
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
- intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_spd_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -741,24 +744,25 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- if (intel_hdmi_set_gcp_infoframe(encoder))
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
- intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_spd_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -793,25 +797,25 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- if (intel_hdmi_set_gcp_infoframe(encoder))
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
- intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_spd_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
u32 val = I915_READ(reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -826,15 +830,15 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
return;
}
- if (intel_hdmi_set_gcp_infoframe(encoder))
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP_HSW;
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
- intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_spd_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
}
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@ -853,31 +857,32 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
adapter, enable);
}
-static void intel_hdmi_prepare(struct intel_encoder *encoder)
+static void intel_hdmi_prepare(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
u32 hdmi_val;
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
- if (crtc->config->pipe_bpp > 24)
+ if (crtc_state->pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
- if (crtc->config->has_hdmi_sink)
+ if (crtc_state->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
if (HAS_PCH_CPT(dev_priv))
@@ -980,9 +985,9 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- WARN_ON(!crtc->config->has_hdmi_sink);
+ WARN_ON(!pipe_config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
@@ -1016,14 +1021,13 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
/*
@@ -1067,7 +1071,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum pipe pipe = crtc->pipe;
u32 temp;
@@ -1129,7 +1133,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1171,7 +1175,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- intel_hdmi->set_infoframes(&encoder->base, false, NULL);
+ intel_hdmi->set_infoframes(&encoder->base, false, old_crtc_state, old_conn_state);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
@@ -1247,7 +1251,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
- if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000)
+ if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
@@ -1326,7 +1330,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
/* See CEA-861-E - 5.1 Default Encoding Parameters */
pipe_config->limited_color_range =
pipe_config->has_hdmi_sink &&
- drm_match_cea_mode(adjusted_mode) > 1;
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
pipe_config->limited_color_range =
intel_hdmi->limited_color_range;
@@ -1643,13 +1648,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- intel_hdmi_prepare(encoder);
+ intel_hdmi_prepare(encoder, pipe_config);
intel_hdmi->set_infoframes(&encoder->base,
pipe_config->has_hdmi_sink,
- adjusted_mode);
+ pipe_config, conn_state);
}
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
@@ -1660,7 +1664,6 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
vlv_phy_pre_encoder_enable(encoder);
@@ -1670,7 +1673,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
intel_hdmi->set_infoframes(&encoder->base,
pipe_config->has_hdmi_sink,
- adjusted_mode);
+ pipe_config, conn_state);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1681,7 +1684,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- intel_hdmi_prepare(encoder);
+ intel_hdmi_prepare(encoder, pipe_config);
vlv_phy_pre_pll_enable(encoder);
}
@@ -1690,7 +1693,7 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- intel_hdmi_prepare(encoder);
+ intel_hdmi_prepare(encoder, pipe_config);
chv_phy_pre_pll_enable(encoder);
}
@@ -1733,9 +1736,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
chv_phy_pre_encoder_enable(encoder);
@@ -1744,8 +1744,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
chv_set_phy_signal_level(encoder, 128, 102, false);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
- adjusted_mode);
+ pipe_config->has_hdmi_sink,
+ pipe_config, conn_state);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1810,13 +1810,13 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
switch (port) {
case PORT_B:
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ddc_pin = GMBUS_PIN_1_BXT;
else
ddc_pin = GMBUS_PIN_DPB;
break;
case PORT_C:
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ddc_pin = GMBUS_PIN_2_BXT;
else
ddc_pin = GMBUS_PIN_DPC;
@@ -1934,10 +1934,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
}
}
-void intel_hdmi_init(struct drm_device *dev,
+void intel_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
@@ -1954,8 +1953,9 @@ void intel_hdmi_init(struct drm_device *dev,
intel_encoder = &intel_dig_port->base;
- drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "HDMI %c", port_name(port));
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 3d546c019de0..b62e3f8ad415 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_enable_locked(dev);
+ drm_kms_helper_poll_enable(dev);
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
@@ -511,7 +511,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
}
if (enabled)
- drm_kms_helper_poll_enable_locked(dev);
+ drm_kms_helper_poll_enable(dev);
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
new file mode 100644
index 000000000000..c144609425f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright © 2016-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "i915_drv.h"
+#include "intel_uc.h"
+
+/**
+ * DOC: HuC Firmware
+ *
+ * Motivation:
+ * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * Implementation:
+ * The same firmware loader is used as the GuC. However, the actual
+ * loading to HW is deferred until GEM initialization is done.
+ *
+ * Note that HuC firmware loading must be done before GuC loading.
+ */
+
+#define BXT_HUC_FW_MAJOR 01
+#define BXT_HUC_FW_MINOR 07
+#define BXT_BLD_NUM 1398
+
+#define SKL_HUC_FW_MAJOR 01
+#define SKL_HUC_FW_MINOR 07
+#define SKL_BLD_NUM 1398
+
+#define KBL_HUC_FW_MAJOR 02
+#define KBL_HUC_FW_MINOR 00
+#define KBL_BLD_NUM 1810
+
+#define HUC_FW_PATH(platform, major, minor, bld_num) \
+ "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
+ __stringify(minor) "_" __stringify(bld_num) ".bin"
+
+#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
+ SKL_HUC_FW_MINOR, SKL_BLD_NUM)
+MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
+
+#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
+ BXT_HUC_FW_MINOR, BXT_BLD_NUM)
+MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
+
+#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
+ KBL_HUC_FW_MINOR, KBL_BLD_NUM)
+MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
+
+/**
+ * huc_ucode_xfer() - DMA's the firmware
+ * @dev_priv: the drm_i915_private device
+ *
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
+{
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct i915_vma *vma;
+ unsigned long offset = 0;
+ u32 size;
+ int ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(huc_fw->obj, false);
+ if (ret) {
+ DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
+ return ret;
+ }
+
+ vma = i915_gem_object_ggtt_pin(huc_fw->obj, NULL, 0, 0,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
+ return PTR_ERR(vma);
+ }
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* init WOPCM */
+ I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
+ I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE |
+ HUC_LOADING_AGENT_GUC);
+
+ /* Set the source address for the uCode */
+ offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
+ I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+ I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+ /* Hardware doesn't look at destination address for HuC. Set it to 0,
+ * but still program the correct address space.
+ */
+ I915_WRITE(DMA_ADDR_1_LOW, 0);
+ I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ size = huc_fw->header_size + huc_fw->ucode_size;
+ I915_WRITE(DMA_COPY_SIZE, size);
+
+ /* Start the DMA */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
+
+ /* Wait for DMA to finish */
+ ret = wait_for((I915_READ(DMA_CTRL) & START_DMA) == 0, 100);
+
+ DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
+
+ /* Disable the bits once DMA is over */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ /*
+ * We keep the object pages for reuse during resume. But we can unpin it
+ * now that DMA has completed, so it doesn't continue to take up space.
+ */
+ i915_vma_unpin(vma);
+
+ return ret;
+}
+
+/**
+ * intel_huc_init() - initiate HuC firmware loading request
+ * @dev_priv: the drm_i915_private device
+ *
+ * Called early during driver load, but after GEM is initialised. The loading
+ * will continue only when driver explicitly specify firmware name and version.
+ * All other cases are considered as INTEL_UC_FIRMWARE_NONE either because HW
+ * is not capable or driver yet support it. And there will be no error message
+ * for INTEL_UC_FIRMWARE_NONE cases.
+ *
+ * The DMA-copying to HW is done later when intel_huc_load() is called.
+ */
+void intel_huc_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_huc *huc = &dev_priv->huc;
+ struct intel_uc_fw *huc_fw = &huc->fw;
+ const char *fw_path = NULL;
+
+ huc_fw->path = NULL;
+ huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+ huc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
+ huc_fw->fw = INTEL_UC_FW_TYPE_HUC;
+
+ if (!HAS_HUC_UCODE(dev_priv))
+ return;
+
+ if (IS_SKYLAKE(dev_priv)) {
+ fw_path = I915_SKL_HUC_UCODE;
+ huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
+ } else if (IS_BROXTON(dev_priv)) {
+ fw_path = I915_BXT_HUC_UCODE;
+ huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
+ } else if (IS_KABYLAKE(dev_priv)) {
+ fw_path = I915_KBL_HUC_UCODE;
+ huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
+ }
+
+ huc_fw->path = fw_path;
+ huc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+
+ DRM_DEBUG_DRIVER("HuC firmware pending, path %s\n", fw_path);
+
+ WARN(huc_fw->path == NULL, "HuC present but no fw path\n");
+
+ intel_uc_fw_fetch(dev_priv, huc_fw);
+}
+
+/**
+ * intel_huc_load() - load HuC uCode to device
+ * @dev_priv: the drm_i915_private device
+ *
+ * Called from guc_setup() during driver loading and also after a GPU reset.
+ * Be note that HuC loading must be done before GuC loading.
+ *
+ * The firmware image should have already been fetched into memory by the
+ * earlier call to intel_huc_init(), so here we need only check that
+ * is succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_huc_load(struct drm_i915_private *dev_priv)
+{
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ int err;
+
+ if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_NONE)
+ return 0;
+
+ DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
+ huc_fw->path,
+ intel_uc_fw_status_repr(huc_fw->fetch_status),
+ intel_uc_fw_status_repr(huc_fw->load_status));
+
+ if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_SUCCESS &&
+ huc_fw->load_status == INTEL_UC_FIRMWARE_FAIL)
+ return -ENOEXEC;
+
+ huc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+
+ switch (huc_fw->fetch_status) {
+ case INTEL_UC_FIRMWARE_FAIL:
+ /* something went wrong :( */
+ err = -EIO;
+ goto fail;
+
+ case INTEL_UC_FIRMWARE_NONE:
+ case INTEL_UC_FIRMWARE_PENDING:
+ default:
+ /* "can't happen" */
+ WARN_ONCE(1, "HuC fw %s invalid fetch_status %s [%d]\n",
+ huc_fw->path,
+ intel_uc_fw_status_repr(huc_fw->fetch_status),
+ huc_fw->fetch_status);
+ err = -ENXIO;
+ goto fail;
+
+ case INTEL_UC_FIRMWARE_SUCCESS:
+ break;
+ }
+
+ err = huc_ucode_xfer(dev_priv);
+ if (err)
+ goto fail;
+
+ huc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
+
+ DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
+ huc_fw->path,
+ intel_uc_fw_status_repr(huc_fw->fetch_status),
+ intel_uc_fw_status_repr(huc_fw->load_status));
+
+ return 0;
+
+fail:
+ if (huc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
+ huc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
+
+ DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
+
+ return err;
+}
+
+/**
+ * intel_huc_fini() - clean up resources allocated for HuC
+ * @dev_priv: the drm_i915_private device
+ *
+ * Cleans up by releasing the huc firmware GEM obj.
+ */
+void intel_huc_fini(struct drm_i915_private *dev_priv)
+{
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ if (huc_fw->obj)
+ i915_gem_object_put(huc_fw->obj);
+ huc_fw->obj = NULL;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+}
+
+/**
+ * intel_guc_auth_huc() - authenticate ucode
+ * @dev_priv: the drm_i915_device
+ *
+ * Triggers a HuC fw authentication request to the GuC via intel_guc_action_
+ * authenticate_huc interface.
+ */
+void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+ struct intel_huc *huc = &dev_priv->huc;
+ struct i915_vma *vma;
+ int ret;
+ u32 data[2];
+
+ if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return;
+
+ vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (IS_ERR(vma)) {
+ DRM_ERROR("failed to pin huc fw object %d\n",
+ (int)PTR_ERR(vma));
+ return;
+ }
+
+ /* Specify auth action and where public signature is. */
+ data[0] = INTEL_GUC_ACTION_AUTHENTICATE_HUC;
+ data[1] = guc_ggtt_offset(vma) + huc->fw.rsa_offset;
+
+ ret = intel_guc_send(guc, data, ARRAY_SIZE(data));
+ if (ret) {
+ DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
+ goto out;
+ }
+
+ /* Check authentication status, it should be done by now */
+ ret = intel_wait_for_register(dev_priv,
+ HUC_STATUS2,
+ HUC_FW_VERIFIED,
+ HUC_FW_VERIFIED,
+ 50);
+
+ if (ret) {
+ DRM_ERROR("HuC: Authentication failed %d\n", ret);
+ goto out;
+ }
+
+out:
+ i915_vma_unpin(vma);
+}
+
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 83f260bb4eef..bce1ba80f277 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -72,7 +72,7 @@ static const struct gmbus_pin gmbus_pins_bxt[] = {
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return &gmbus_pins_bxt[pin];
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return &gmbus_pins_skl[pin];
@@ -87,7 +87,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl);
@@ -111,10 +111,8 @@ to_intel_gmbus(struct i2c_adapter *i2c)
}
void
-intel_i2c_reset(struct drm_device *dev)
+intel_i2c_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(GMBUS0, 0);
I915_WRITE(GMBUS4, 0);
}
@@ -141,7 +139,7 @@ static u32 get_reserved(struct intel_gmbus *bus)
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev_priv) && !IS_845G(dev_priv))
+ if (!IS_I830(dev_priv) && !IS_I845G(dev_priv))
reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -211,7 +209,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- intel_i2c_reset(&dev_priv->drm);
+ intel_i2c_reset(dev_priv);
intel_i2c_quirk_set(dev_priv, true);
set_data(bus, 1);
set_clock(bus, 1);
@@ -617,11 +615,10 @@ static const struct i2c_algorithm gmbus_algorithm = {
/**
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
- * @dev: DRM device
+ * @dev_priv: i915 device private
*/
-int intel_setup_gmbus(struct drm_device *dev)
+int intel_setup_gmbus(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_gmbus *bus;
unsigned int pin;
@@ -678,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
goto err;
}
- intel_i2c_reset(&dev_priv->drm);
+ intel_i2c_reset(dev_priv);
return 0;
@@ -724,9 +721,8 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
mutex_unlock(&dev_priv->gmbus_mutex);
}
-void intel_teardown_gmbus(struct drm_device *dev)
+void intel_teardown_gmbus(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_gmbus *bus;
unsigned int pin;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index beabc17e7c8a..ebf8023d21e6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -230,8 +230,6 @@ enum {
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
-static int intel_lr_context_pin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
@@ -362,7 +360,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct drm_i915_gem_request *rq)
{
struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
- struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct i915_hw_ppgtt *ppgtt =
+ rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = rq->tail;
@@ -415,7 +414,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
{
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- ctx->execlists_force_single_submission);
+ i915_gem_context_force_single_submission(ctx));
}
static bool can_merge_ctx(const struct i915_gem_context *prev,
@@ -514,15 +513,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
RB_CLEAR_NODE(&cursor->priotree.node);
cursor->priotree.priority = INT_MAX;
- /* We keep the previous context alive until we retire the
- * following request. This ensures that any the context object
- * is still pinned for any residual writes the HW makes into it
- * on the context switch into the next object following the
- * breadcrumb. Otherwise, we may retire the context too early.
- */
- cursor->previous_context = engine->last_context;
- engine->last_context = cursor->ctx;
-
__i915_gem_request_submit(cursor);
last = cursor;
submit = true;
@@ -695,7 +685,6 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
{
- static DEFINE_MUTEX(lock);
struct intel_engine_cs *engine = NULL;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
@@ -704,8 +693,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
if (prio <= READ_ONCE(request->priotree.priority))
return;
- /* Need global lock to use the temporary link inside i915_dependency */
- mutex_lock(&lock);
+ /* Need BKL in order to use the temporary link inside i915_dependency */
+ lockdep_assert_held(&request->i915->drm.struct_mutex);
stack.signaler = &request->priotree;
list_add(&stack.dfs_link, &dfs);
@@ -734,7 +723,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
if (prio > READ_ONCE(p->signaler->priority))
list_move_tail(&p->dfs_link, &dfs);
- p = list_next_entry(dep, dfs_link);
+ list_safe_reset_next(dep, p, dfs_link);
if (!RB_EMPTY_NODE(&pt->node))
continue;
@@ -772,80 +761,14 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
if (engine)
spin_unlock_irq(&engine->timeline->lock);
- mutex_unlock(&lock);
-
/* XXX Do we need to preempt to make room for us and our deps? */
}
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
-{
- struct intel_engine_cs *engine = request->engine;
- struct intel_context *ce = &request->ctx->engine[engine->id];
- int ret;
-
- /* Flush enough space to reduce the likelihood of waiting after
- * we start building the request - in which case we will just
- * have to repeat work.
- */
- request->reserved_space += EXECLISTS_REQUEST_SIZE;
-
- if (!ce->state) {
- ret = execlists_context_deferred_alloc(request->ctx, engine);
- if (ret)
- return ret;
- }
-
- request->ring = ce->ring;
-
- ret = intel_lr_context_pin(request->ctx, engine);
- if (ret)
- return ret;
-
- if (i915.enable_guc_submission) {
- /*
- * Check that the GuC has space for the request before
- * going any further, as the i915_add_request() call
- * later on mustn't fail ...
- */
- ret = i915_guc_wq_reserve(request);
- if (ret)
- goto err_unpin;
- }
-
- ret = intel_ring_begin(request, 0);
- if (ret)
- goto err_unreserve;
-
- if (!ce->initialised) {
- ret = engine->init_context(request);
- if (ret)
- goto err_unreserve;
-
- ce->initialised = true;
- }
-
- /* Note that after this point, we have committed to using
- * this request as it is being used to both track the
- * state of engine initialisation and liveness of the
- * golden renderstate above. Think twice before you try
- * to cancel/unwind this request now.
- */
-
- request->reserved_space -= EXECLISTS_REQUEST_SIZE;
- return 0;
-
-err_unreserve:
- if (i915.enable_guc_submission)
- i915_guc_wq_unreserve(request);
-err_unpin:
- intel_lr_context_unpin(request->ctx, engine);
- return ret;
-}
-
-static int intel_lr_context_pin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
+ unsigned int flags;
void *vaddr;
int ret;
@@ -854,8 +777,20 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
if (ce->pin_count++)
return 0;
- ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
+ if (!ce->state) {
+ ret = execlists_context_deferred_alloc(ctx, engine);
+ if (ret)
+ goto err;
+ }
+ GEM_BUG_ON(!ce->state);
+
+ flags = PIN_GLOBAL;
+ if (ctx->ggtt_offset_bias)
+ flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
+ if (i915_gem_context_is_kernel(ctx))
+ flags |= PIN_HIGH;
+
+ ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags);
if (ret)
goto err;
@@ -865,7 +800,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
goto unpin_vma;
}
- ret = intel_ring_pin(ce->ring);
+ ret = intel_ring_pin(ce->ring, ctx->ggtt_offset_bias);
if (ret)
goto unpin_map;
@@ -877,12 +812,6 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
ce->state->obj->mm.dirty = true;
- /* Invalidate GuC TLB. */
- if (i915.enable_guc_submission) {
- struct drm_i915_private *dev_priv = ctx->i915;
- I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- }
-
i915_gem_context_get(ctx);
return 0;
@@ -895,8 +824,8 @@ err:
return ret;
}
-void intel_lr_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static void execlists_context_unpin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
@@ -914,6 +843,63 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
i915_gem_context_put(ctx);
}
+static int execlists_request_alloc(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_context *ce = &request->ctx->engine[engine->id];
+ int ret;
+
+ GEM_BUG_ON(!ce->pin_count);
+
+ /* Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+ GEM_BUG_ON(!ce->ring);
+ request->ring = ce->ring;
+
+ if (i915.enable_guc_submission) {
+ /*
+ * Check that the GuC has space for the request before
+ * going any further, as the i915_add_request() call
+ * later on mustn't fail ...
+ */
+ ret = i915_guc_wq_reserve(request);
+ if (ret)
+ goto err;
+ }
+
+ ret = intel_ring_begin(request, 0);
+ if (ret)
+ goto err_unreserve;
+
+ if (!ce->initialised) {
+ ret = engine->init_context(request);
+ if (ret)
+ goto err_unreserve;
+
+ ce->initialised = true;
+ }
+
+ /* Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+ return 0;
+
+err_unreserve:
+ if (i915.enable_guc_submission)
+ i915_guc_wq_unreserve(request);
+err:
+ return ret;
+}
+
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
@@ -1236,11 +1222,11 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size));
+ obj = i915_gem_object_create(engine->i915, PAGE_ALIGN(size));
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1334,15 +1320,6 @@ out:
return ret;
}
-static void lrc_init_hws(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE(RING_HWS_PGA(engine->mmio_base),
- engine->status_page.ggtt_offset);
- POSTING_READ(RING_HWS_PGA(engine->mmio_base));
-}
-
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1352,20 +1329,19 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- lrc_init_hws(engine);
-
intel_engine_reset_breadcrumbs(engine);
+ intel_engine_init_hangcheck(engine);
I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
-
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+ engine->status_page.ggtt_offset);
+ POSTING_READ(RING_HWS_PGA(engine->mmio_base));
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
- intel_engine_init_hangcheck(engine);
-
/* After a GPU reset, we may have requests to replay */
if (!execlists_elsp_idle(engine)) {
engine->execlist_port[0].count = 0;
@@ -1414,7 +1390,20 @@ static void reset_common_ring(struct intel_engine_cs *engine,
{
struct drm_i915_private *dev_priv = engine->i915;
struct execlist_port *port = engine->execlist_port;
- struct intel_context *ce = &request->ctx->engine[engine->id];
+ struct intel_context *ce;
+
+ /* If the request was innocent, we leave the request in the ELSP
+ * and will try to replay it on restarting. The context image may
+ * have been corrupted by the reset, in which case we may have
+ * to service a new GPU hang, but more likely we can continue on
+ * without impact.
+ *
+ * If the request was guilty, we presume the context is corrupt
+ * and have to at least restore the RING register in the context
+ * image back to the expected values to skip over the guilty request.
+ */
+ if (!request || request->fence.error != -EIO)
+ return;
/* We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
@@ -1423,6 +1412,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
+ ce = &request->ctx->engine[engine->id];
execlists_init_reg_state(ce->lrc_reg_state,
request->ctx, engine, ce->ring);
@@ -1784,13 +1774,12 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
if (engine->cleanup)
engine->cleanup(engine);
- intel_engine_cleanup_common(engine);
-
if (engine->status_page.vma) {
i915_gem_object_unpin_map(engine->status_page.vma->obj);
engine->status_page.vma = NULL;
}
- intel_lr_context_unpin(dev_priv->kernel_context, engine);
+
+ intel_engine_cleanup_common(engine);
lrc_destroy_wa_ctx_obj(engine);
engine->i915 = NULL;
@@ -1815,6 +1804,12 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
engine->reset_hw = reset_common_ring;
+
+ engine->context_pin = execlists_context_pin;
+ engine->context_unpin = execlists_context_unpin;
+
+ engine->request_alloc = execlists_request_alloc;
+
engine->emit_flush = gen8_emit_flush;
engine->emit_breadcrumb = gen8_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
@@ -1897,18 +1892,6 @@ logical_ring_init(struct intel_engine_cs *engine)
if (ret)
goto error;
- ret = execlists_context_deferred_alloc(dctx, engine);
- if (ret)
- goto error;
-
- /* As this is the default context, always pin it */
- ret = intel_lr_context_pin(dctx, engine);
- if (ret) {
- DRM_ERROR("Failed to pin context for %s: %d\n",
- engine->name, ret);
- goto error;
- }
-
/* And setup the hardware status page. */
ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
if (ret) {
@@ -1943,7 +1926,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
- ret = intel_engine_create_scratch(engine, 4096);
+ ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
return ret;
@@ -2119,19 +2102,12 @@ static void execlists_init_reg_state(u32 *reg_state,
ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
0);
- if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+ if (ppgtt && USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
ASSIGN_CTX_PML4(ppgtt, reg_state);
- } else {
- /* 32b PPGTT
- * PDP*_DESCRIPTOR contains the base address of space supported.
- * With dynamic page allocation, PDPs may not be allocated at
- * this point. Point the unallocated PDPs to the scratch page
- */
- execlists_update_context_pdps(ppgtt, reg_state);
}
if (engine->id == RCS) {
@@ -2225,18 +2201,19 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
WARN_ON(ce->state);
- context_size = round_up(intel_lr_context_size(engine), 4096);
+ context_size = round_up(intel_lr_context_size(engine),
+ I915_GTT_PAGE_SIZE);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
- ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
+ ctx_obj = i915_gem_object_create(ctx->i915, context_size);
if (IS_ERR(ctx_obj)) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
return PTR_ERR(ctx_obj);
}
- vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index c1f546180ba2..0c852c024227 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -26,7 +26,7 @@
#include "intel_ringbuffer.h"
-#define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
/* Execlists regs */
#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
@@ -63,14 +63,12 @@ enum {
};
/* Logical Rings */
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
-int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int logical_render_ring_init(struct intel_engine_cs *engine);
int logical_xcs_ring_init(struct intel_engine_cs *engine);
-int intel_engines_init(struct drm_device *dev);
+int intel_engines_init(struct drm_i915_private *dev_priv);
/* Logical Ring Contexts */
@@ -79,13 +77,10 @@ int intel_engines_init(struct drm_device *dev);
#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
+struct drm_i915_private;
struct i915_gem_context;
uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
-void intel_lr_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
-
-struct drm_i915_private;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index daa523410953..c300647ef604 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -35,21 +35,59 @@ static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
return &dig_port->dp;
}
+static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
+{
+ switch (mode) {
+ case DRM_LSPCON_MODE_PCON:
+ return "PCON";
+ case DRM_LSPCON_MODE_LS:
+ return "LS";
+ case DRM_LSPCON_MODE_INVALID:
+ return "INVALID";
+ default:
+ MISSING_CASE(mode);
+ return "INVALID";
+ }
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
- enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
+ enum drm_lspcon_mode current_mode;
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
- if (drm_lspcon_get_mode(adapter, &current_mode))
+ if (drm_lspcon_get_mode(adapter, &current_mode)) {
DRM_ERROR("Error reading LSPCON mode\n");
- else
- DRM_DEBUG_KMS("Current LSPCON mode %s\n",
- current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS");
+ return DRM_LSPCON_MODE_INVALID;
+ }
+ return current_mode;
+}
+
+static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
+ enum drm_lspcon_mode mode)
+{
+ enum drm_lspcon_mode current_mode;
+
+ current_mode = lspcon_get_current_mode(lspcon);
+ if (current_mode == mode || current_mode == DRM_LSPCON_MODE_INVALID)
+ goto out;
+
+ DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
+ lspcon_mode_name(mode));
+
+ wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode ||
+ current_mode == DRM_LSPCON_MODE_INVALID, 100);
+ if (current_mode != mode)
+ DRM_DEBUG_KMS("LSPCON mode hasn't settled\n");
+
+out:
+ DRM_DEBUG_KMS("Current LSPCON mode %s\n",
+ lspcon_mode_name(current_mode));
+
return current_mode;
}
static int lspcon_change_mode(struct intel_lspcon *lspcon,
- enum drm_lspcon_mode mode, bool force)
+ enum drm_lspcon_mode mode)
{
int err;
enum drm_lspcon_mode current_mode;
@@ -77,10 +115,30 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
return 0;
}
+static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
+{
+ uint8_t rev;
+
+ if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
+ &rev) != 1) {
+ DRM_DEBUG_KMS("Native AUX CH down\n");
+ return false;
+ }
+
+ DRM_DEBUG_KMS("Native AUX CH up, DPCD version: %d.%d\n",
+ rev >> 4, rev & 0xf);
+
+ return true;
+}
+
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
enum drm_dp_dual_mode_type adaptor_type;
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+ enum drm_lspcon_mode expected_mode;
+
+ expected_mode = lspcon_wake_native_aux_ch(lspcon) ?
+ DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
/* Lets probe the adaptor and check its type */
adaptor_type = drm_dp_dual_mode_detect(adapter);
@@ -92,7 +150,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
/* Yay ... got a LSPCON device */
DRM_DEBUG_KMS("LSPCON detected\n");
- lspcon->mode = lspcon_get_current_mode(lspcon);
+ lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
lspcon->active = true;
return true;
}
@@ -100,6 +158,8 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
unsigned long start = jiffies;
if (!lspcon->desc_valid)
@@ -115,7 +175,8 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
if (!__intel_dp_read_desc(intel_dp, &desc))
return;
- if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
+ if (intel_digital_port_connected(dev_priv, dig_port) &&
+ !memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
jiffies_to_msecs(jiffies - start));
return;
@@ -132,14 +193,29 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
void lspcon_resume(struct intel_lspcon *lspcon)
{
- lspcon_resume_in_pcon_wa(lspcon);
+ enum drm_lspcon_mode expected_mode;
+
+ if (lspcon_wake_native_aux_ch(lspcon)) {
+ expected_mode = DRM_LSPCON_MODE_PCON;
+ lspcon_resume_in_pcon_wa(lspcon);
+ } else {
+ expected_mode = DRM_LSPCON_MODE_LS;
+ }
+
+ if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON)
+ return;
- if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON))
DRM_ERROR("LSPCON resume failed\n");
else
DRM_DEBUG_KMS("LSPCON resume success\n");
}
+void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
+{
+ lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
+}
+
bool lspcon_init(struct intel_digital_port *intel_dig_port)
{
struct intel_dp *dp = &intel_dig_port->dp;
@@ -166,8 +242,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
* 2.0 sinks.
*/
if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
- if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON,
- true) < 0) {
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
DRM_ERROR("LSPCON mode change to PCON failed\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d12ef0047d49..9ca4dc4d2378 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -460,13 +460,13 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
static enum drm_connector_status
intel_lvds_detect(struct drm_connector *connector, bool force)
{
- struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
enum drm_connector_status status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- status = intel_panel_detect(dev);
+ status = intel_panel_detect(dev_priv);
if (status != connector_status_unknown)
return status;
@@ -971,9 +971,9 @@ static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_device *dev)
+void intel_lvds_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
struct intel_lvds_connector *lvds_connector;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 80bb9247ce66..c787fc4e6eb9 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -182,7 +182,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->table = broxton_mocs_table;
result = true;
@@ -380,7 +380,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
/**
* intel_mocs_init_l3cc_table() - program the mocs control table
- * @dev: The the device to be programmed.
+ * @dev_priv: i915 device private
*
* This function simply programs the mocs registers for the given table
* starting at the given address. This register set is programmed in pairs.
@@ -392,9 +392,8 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
*
* Return: Nothing.
*/
-void intel_mocs_init_l3cc_table(struct drm_device *dev)
+void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_mocs_table table;
unsigned int i;
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index a8bd9f7bfece..ce4a5dfa5f94 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -53,7 +53,7 @@
#include "i915_drv.h"
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
-void intel_mocs_init_l3cc_table(struct drm_device *dev);
+void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
int intel_mocs_init_engine(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f4429f67a4e3..4a862a358c70 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -982,7 +982,18 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
opregion->vbt_size = vbt_size;
} else {
vbt = base + OPREGION_VBT_OFFSET;
- vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET;
+ /*
+ * The VBT specification says that if the ASLE ext
+ * mailbox is not used its area is reserved, but
+ * on some CHT boards the VBT extends into the
+ * ASLE ext area. Allow this even though it is
+ * against the spec, so we do not end up rejecting
+ * the VBT on those boards (and end up not finding the
+ * LCD panel because of this).
+ */
+ vbt_size = (mboxes & MBOX_ASLE_EXT) ?
+ OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
+ vbt_size -= OPREGION_VBT_OFFSET;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index e589e17876dc..0608fad7f593 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -187,6 +187,29 @@ struct intel_overlay {
struct i915_gem_active last_flip;
};
+static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ u8 val;
+
+ /* WA_OVERLAY_CLKGATE:alm */
+ if (enable)
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ else
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+
+ /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
+ pci_bus_read_config_byte(pdev->bus,
+ PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
+ if (enable)
+ val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
+ else
+ val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
+ pci_bus_write_config_byte(pdev->bus,
+ PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
+}
+
static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
@@ -262,6 +285,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
overlay->active = true;
+ if (IS_I830(dev_priv))
+ i830_overlay_clock_gating(dev_priv, false);
+
ring = req->ring;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
@@ -272,8 +298,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
return intel_overlay_do_wait_request(overlay, req, NULL);
}
+static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
+ struct i915_vma *vma)
+{
+ enum pipe pipe = overlay->crtc->pipe;
+
+ WARN_ON(overlay->old_vma);
+
+ i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
+ vma ? vma->obj : NULL,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ intel_frontbuffer_flip_prepare(overlay->i915,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ overlay->old_vma = overlay->vma;
+ if (vma)
+ overlay->vma = i915_vma_get(vma);
+ else
+ overlay->vma = NULL;
+}
+
/* overlay needs to be enabled in OCMD reg */
static int intel_overlay_continue(struct intel_overlay *overlay,
+ struct i915_vma *vma,
bool load_polyphase_filter)
{
struct drm_i915_private *dev_priv = overlay->i915;
@@ -308,53 +356,57 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
+ intel_overlay_flip_prepare(overlay, vma);
+
intel_overlay_submit_request(overlay, req, NULL);
return 0;
}
-static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
- struct drm_i915_gem_request *req)
+static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
{
- struct intel_overlay *overlay =
- container_of(active, typeof(*overlay), last_flip);
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
if (WARN_ON(!vma))
return;
- i915_gem_track_fb(vma->obj, NULL,
- INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip_complete(overlay->i915,
+ INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_gem_object_unpin_from_display_plane(vma);
i915_vma_put(vma);
}
+static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
+ struct drm_i915_gem_request *req)
+{
+ struct intel_overlay *overlay =
+ container_of(active, typeof(*overlay), last_flip);
+
+ intel_overlay_release_old_vma(overlay);
+}
+
static void intel_overlay_off_tail(struct i915_gem_active *active,
struct drm_i915_gem_request *req)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
- struct i915_vma *vma;
-
- /* never have the overlay hw on without showing a frame */
- vma = fetch_and_zero(&overlay->vma);
- if (WARN_ON(!vma))
- return;
+ struct drm_i915_private *dev_priv = overlay->i915;
- i915_gem_object_unpin_from_display_plane(vma);
- i915_vma_put(vma);
+ intel_overlay_release_old_vma(overlay);
overlay->crtc->overlay = NULL;
overlay->crtc = NULL;
overlay->active = false;
+
+ if (IS_I830(dev_priv))
+ i830_overlay_clock_gating(dev_priv, true);
}
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_gem_request *req;
struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
@@ -379,25 +431,21 @@ static int intel_overlay_off(struct intel_overlay *overlay)
}
ring = req->ring;
+
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+
/* turn overlay off */
- if (IS_I830(dev_priv)) {
- /* Workaround: Don't disable the overlay fully, since otherwise
- * it dies on the next OVERLAY_ON cmd. */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- } else {
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring,
- MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- }
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+
intel_ring_advance(ring);
+ intel_overlay_flip_prepare(overlay, NULL);
+
return intel_overlay_do_wait_request(overlay, req,
intel_overlay_off_tail);
}
@@ -542,51 +590,57 @@ static int uv_vsubsampling(u32 format)
static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
{
- u32 mask, shift, ret;
- if (IS_GEN2(dev_priv)) {
- mask = 0x1f;
- shift = 5;
- } else {
- mask = 0x3f;
- shift = 6;
- }
- ret = ((offset + width + mask) >> shift) - (offset >> shift);
- if (!IS_GEN2(dev_priv))
- ret <<= 1;
- ret -= 1;
- return ret << 2;
+ u32 sw;
+
+ if (IS_GEN2(dev_priv))
+ sw = ALIGN((offset & 31) + width, 32);
+ else
+ sw = ALIGN((offset & 63) + width, 64);
+
+ if (sw == 0)
+ return 0;
+
+ return (sw - 32) >> 3;
}
-static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
- 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
- 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
- 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
- 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
- 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
- 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
- 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
- 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
- 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
- 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
- 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
- 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
- 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
- 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
- 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
- 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
- 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+static const u16 y_static_hcoeffs[N_PHASES][N_HORIZ_Y_TAPS] = {
+ [ 0] = { 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, },
+ [ 1] = { 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, },
+ [ 2] = { 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, },
+ [ 3] = { 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, },
+ [ 4] = { 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, },
+ [ 5] = { 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, },
+ [ 6] = { 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, },
+ [ 7] = { 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, },
+ [ 8] = { 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, },
+ [ 9] = { 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, },
+ [10] = { 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, },
+ [11] = { 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, },
+ [12] = { 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, },
+ [13] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, },
+ [14] = { 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, },
+ [15] = { 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, },
+ [16] = { 0xb000, 0x3000, 0x0800, 0x3000, 0xb000, },
};
-static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
- 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
- 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
- 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
- 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
- 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
- 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
- 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
- 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
- 0x3000, 0x0800, 0x3000
+static const u16 uv_static_hcoeffs[N_PHASES][N_HORIZ_UV_TAPS] = {
+ [ 0] = { 0x3000, 0x1800, 0x1800, },
+ [ 1] = { 0xb000, 0x18d0, 0x2e60, },
+ [ 2] = { 0xb000, 0x1990, 0x2ce0, },
+ [ 3] = { 0xb020, 0x1a68, 0x2b40, },
+ [ 4] = { 0xb040, 0x1b20, 0x29e0, },
+ [ 5] = { 0xb060, 0x1bd8, 0x2880, },
+ [ 6] = { 0xb080, 0x1c88, 0x3e60, },
+ [ 7] = { 0xb0a0, 0x1d28, 0x3c00, },
+ [ 8] = { 0xb0c0, 0x1db8, 0x39e0, },
+ [ 9] = { 0xb0e0, 0x1e40, 0x37e0, },
+ [10] = { 0xb100, 0x1eb8, 0x3620, },
+ [11] = { 0xb100, 0x1f18, 0x34a0, },
+ [12] = { 0xb100, 0x1f68, 0x3360, },
+ [13] = { 0xb0e0, 0x1fa8, 0x3240, },
+ [14] = { 0xb0c0, 0x1fe0, 0x3140, },
+ [15] = { 0xb060, 0x1ff0, 0x30a0, },
+ [16] = { 0x3000, 0x0800, 0x3000, },
};
static void update_polyphase_filter(struct overlay_registers __iomem *regs)
@@ -659,31 +713,32 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
static void update_colorkey(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
+ const struct intel_plane_state *state =
+ to_intel_plane_state(overlay->crtc->base.primary->state);
u32 key = overlay->color_key;
- u32 flags;
+ u32 format = 0;
+ u32 flags = 0;
- flags = 0;
if (overlay->color_key_enabled)
flags |= DST_KEY_ENABLE;
- switch (overlay->crtc->base.primary->fb->bits_per_pixel) {
- case 8:
+ if (state->base.visible)
+ format = state->base.fb->format->format;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
key = 0;
flags |= CLK_RGB8I_MASK;
break;
-
- case 16:
- if (overlay->crtc->base.primary->fb->depth == 15) {
- key = RGB15_TO_COLORKEY(key);
- flags |= CLK_RGB15_MASK;
- } else {
- key = RGB16_TO_COLORKEY(key);
- flags |= CLK_RGB16_MASK;
- }
+ case DRM_FORMAT_XRGB1555:
+ key = RGB15_TO_COLORKEY(key);
+ flags |= CLK_RGB15_MASK;
break;
-
- case 24:
- case 32:
+ case DRM_FORMAT_RGB565:
+ key = RGB16_TO_COLORKEY(key);
+ flags |= CLK_RGB16_MASK;
+ break;
+ default:
flags |= CLK_RGB24_MASK;
break;
}
@@ -756,8 +811,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- vma = i915_gem_object_pin_to_display_plane(new_bo, 0,
- &i915_ggtt_view_normal);
+ vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -836,18 +890,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
intel_overlay_unmap_regs(overlay, regs);
- ret = intel_overlay_continue(overlay, scale_changed);
+ ret = intel_overlay_continue(overlay, vma, scale_changed);
if (ret)
goto out_unpin;
- i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
- vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
-
- overlay->old_vma = overlay->vma;
- overlay->vma = vma;
-
- intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe));
-
return 0;
out_unpin:
@@ -921,12 +967,13 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
static int check_overlay_dst(struct intel_overlay *overlay,
struct drm_intel_overlay_put_image *rec)
{
- struct drm_display_mode *mode = &overlay->crtc->base.mode;
+ const struct intel_crtc_state *pipe_config =
+ overlay->crtc->config;
- if (rec->dst_x < mode->hdisplay &&
- rec->dst_x + rec->dst_width <= mode->hdisplay &&
- rec->dst_y < mode->vdisplay &&
- rec->dst_y + rec->dst_height <= mode->vdisplay)
+ if (rec->dst_x < pipe_config->pipe_src_w &&
+ rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
+ rec->dst_y < pipe_config->pipe_src_h &&
+ rec->dst_y + rec->dst_height <= pipe_config->pipe_src_h)
return 0;
else
return -EINVAL;
@@ -958,7 +1005,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
u32 tmp;
/* check src dimensions */
- if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
+ if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
@@ -1010,7 +1057,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
return -EINVAL;
/* stride checking */
- if (IS_I830(dev_priv) || IS_845G(dev_priv))
+ if (IS_I830(dev_priv) || IS_I845G(dev_priv))
stride_mask = 255;
else
stride_mask = 63;
@@ -1058,33 +1105,6 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
return 0;
}
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
-{
- u32 pfit_control;
-
- /* i830 doesn't have a panel fitter */
- if (INTEL_GEN(dev_priv) <= 3 &&
- (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
- return -1;
-
- pfit_control = I915_READ(PFIT_CONTROL);
-
- /* See if the panel fitter is in use */
- if ((pfit_control & PFIT_ENABLE) == 0)
- return -1;
-
- /* 965 can place panel fitter on either pipe */
- if (IS_GEN4(dev_priv))
- return (pfit_control >> 29) & 0x3;
-
- /* older chips can only use pipe 1 */
- return 1;
-}
-
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1146,7 +1166,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
if (overlay->crtc != crtc) {
- struct drm_display_mode *mode = &crtc->base.mode;
ret = intel_overlay_switch_off(overlay);
if (ret != 0)
goto out_unlock;
@@ -1159,8 +1178,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
crtc->overlay = overlay;
/* line too wide, i.e. one-line-mode */
- if (mode->hdisplay > 1024 &&
- intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
+ if (crtc->config->pipe_src_w > 1024 &&
+ crtc->config->gmch_pfit.control & PFIT_ENABLE) {
overlay->pfit_active = true;
update_pfit_vscale_ratio(overlay);
} else
@@ -1215,6 +1234,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
+ i915_gem_object_put(new_bo);
kfree(params);
@@ -1392,10 +1412,9 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
reg_bo = NULL;
if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
- reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
- PAGE_SIZE);
+ reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
if (reg_bo == NULL)
- reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
+ reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
if (IS_ERR(reg_bo))
goto out_free;
overlay->reg_bo = reg_bo;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 08ab6d762ca4..1a6ff26dea20 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -48,7 +48,7 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
/**
* intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
- * @dev: drm device
+ * @dev_priv: i915 device instance
* @fixed_mode : panel native mode
* @connector: LVDS/eDP connector
*
@@ -56,7 +56,7 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
* Find the reduced downclock for LVDS/eDP in EDID.
*/
struct drm_display_mode *
-intel_find_panel_downclock(struct drm_device *dev,
+intel_find_panel_downclock(struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector)
{
@@ -94,7 +94,7 @@ intel_find_panel_downclock(struct drm_device *dev,
}
if (temp_downclock < fixed_mode->clock)
- return drm_mode_duplicate(dev, tmp_mode);
+ return drm_mode_duplicate(&dev_priv->drm, tmp_mode);
else
return NULL;
}
@@ -375,10 +375,8 @@ out:
}
enum drm_connector_status
-intel_panel_detect(struct drm_device *dev)
+intel_panel_detect(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Assume that the BIOS does not lie through the OpRegion... */
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
return *dev_priv->opregion.lid_state & 0x1 ?
@@ -1039,10 +1037,7 @@ static void bxt_enable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 pwm_ctl, val;
- /* To use 2nd set of backlight registers, utility pin has to be
- * enabled with PWM mode.
- * The field should only be changed when the utility pin is disabled
- */
+ /* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
val = I915_READ(UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
@@ -1332,8 +1327,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int clock;
if (IS_G4X(dev_priv))
@@ -1608,19 +1602,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
- /*
- * For BXT hard coding the Backlight controller to 0.
- * TODO : Read the controller value from VBT and generalize
- */
- panel->backlight.controller = 0;
+ panel->backlight.controller = dev_priv->vbt.backlight.controller;
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- /* Keeping the check if controller 1 is to be programmed.
- * This will come into affect once the VBT parsing
- * is fixed for controller selection, and controller 1 is used
- * for a prticular display configuration.
- */
+ /* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
val = I915_READ(UTIL_PIN_CTL);
panel->backlight.util_pin_active_low =
@@ -1756,7 +1742,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
panel->backlight.setup = bxt_setup_backlight;
panel->backlight.enable = bxt_enable_backlight;
panel->backlight.disable = bxt_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
new file mode 100644
index 000000000000..c0b1f99da37b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -0,0 +1,1011 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Author: Damien Lespiau <damien.lespiau@intel.com>
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include "intel_drv.h"
+
+struct pipe_crc_info {
+ const char *name;
+ struct drm_i915_private *dev_priv;
+ enum pipe pipe;
+};
+
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release.
+ */
+static int drm_add_fake_info_node(struct drm_minor *minor,
+ struct dentry *ent, const void *key)
+{
+ struct drm_info_node *node;
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+}
+
+static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
+{
+ struct pipe_crc_info *info = inode->i_private;
+ struct drm_i915_private *dev_priv = info->dev_priv;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+ if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
+ return -ENODEV;
+
+ spin_lock_irq(&pipe_crc->lock);
+
+ if (pipe_crc->opened) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return -EBUSY; /* already open */
+ }
+
+ pipe_crc->opened = true;
+ filep->private_data = inode->i_private;
+
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return 0;
+}
+
+static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
+{
+ struct pipe_crc_info *info = inode->i_private;
+ struct drm_i915_private *dev_priv = info->dev_priv;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->opened = false;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return 0;
+}
+
+/* (6 fields, 8 chars each, space separated (5) + '\n') */
+#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
+/* account for \'0' */
+#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
+
+static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
+{
+ assert_spin_locked(&pipe_crc->lock);
+ return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+ INTEL_PIPE_CRC_ENTRIES_NR);
+}
+
+static ssize_t
+i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
+ loff_t *pos)
+{
+ struct pipe_crc_info *info = filep->private_data;
+ struct drm_i915_private *dev_priv = info->dev_priv;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+ char buf[PIPE_CRC_BUFFER_LEN];
+ int n_entries;
+ ssize_t bytes_read;
+
+ /*
+ * Don't allow user space to provide buffers not big enough to hold
+ * a line of data.
+ */
+ if (count < PIPE_CRC_LINE_LEN)
+ return -EINVAL;
+
+ if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
+ return 0;
+
+ /* nothing to read */
+ spin_lock_irq(&pipe_crc->lock);
+ while (pipe_crc_data_count(pipe_crc) == 0) {
+ int ret;
+
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
+ pipe_crc_data_count(pipe_crc), pipe_crc->lock);
+ if (ret) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return ret;
+ }
+ }
+
+ /* We now have one or more entries to read */
+ n_entries = count / PIPE_CRC_LINE_LEN;
+
+ bytes_read = 0;
+ while (n_entries > 0) {
+ struct intel_pipe_crc_entry *entry =
+ &pipe_crc->entries[pipe_crc->tail];
+
+ if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+ INTEL_PIPE_CRC_ENTRIES_NR) < 1)
+ break;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+ pipe_crc->tail = (pipe_crc->tail + 1) &
+ (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+
+ bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
+ "%8u %8x %8x %8x %8x %8x\n",
+ entry->frame, entry->crc[0],
+ entry->crc[1], entry->crc[2],
+ entry->crc[3], entry->crc[4]);
+
+ spin_unlock_irq(&pipe_crc->lock);
+
+ if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
+ return -EFAULT;
+
+ user_buf += PIPE_CRC_LINE_LEN;
+ n_entries--;
+
+ spin_lock_irq(&pipe_crc->lock);
+ }
+
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return bytes_read;
+}
+
+static const struct file_operations i915_pipe_crc_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_pipe_crc_open,
+ .read = i915_pipe_crc_read,
+ .release = i915_pipe_crc_release,
+};
+
+static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
+ {
+ .name = "i915_pipe_A_crc",
+ .pipe = PIPE_A,
+ },
+ {
+ .name = "i915_pipe_B_crc",
+ .pipe = PIPE_B,
+ },
+ {
+ .name = "i915_pipe_C_crc",
+ .pipe = PIPE_C,
+ },
+};
+
+static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(minor->dev);
+ struct dentry *ent;
+ struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
+
+ info->dev_priv = dev_priv;
+ ent = debugfs_create_file(info->name, S_IRUGO, root, info,
+ &i915_pipe_crc_fops);
+ if (!ent)
+ return -ENOMEM;
+
+ return drm_add_fake_info_node(minor, ent, info);
+}
+
+static const char * const pipe_crc_sources[] = {
+ "none",
+ "plane1",
+ "plane2",
+ "pf",
+ "pipe",
+ "TV",
+ "DP-B",
+ "DP-C",
+ "DP-D",
+ "auto",
+};
+
+static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
+ return pipe_crc_sources[source];
+}
+
+static int display_crc_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ int i;
+
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ seq_printf(m, "%c %s\n", pipe_name(i),
+ pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+
+ return 0;
+}
+
+static int display_crc_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, display_crc_ctl_show, inode->i_private);
+}
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ struct intel_digital_port *dig_port;
+ int ret = 0;
+
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ drm_modeset_lock_all(dev);
+ for_each_intel_encoder(dev, encoder) {
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->pipe != pipe)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_TVOUT:
+ *source = INTEL_PIPE_CRC_SOURCE_TV;
+ break;
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_EDP:
+ dig_port = enc_to_dig_port(&encoder->base);
+ switch (dig_port->port) {
+ case PORT_B:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_B;
+ break;
+ case PORT_C:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_C;
+ break;
+ case PORT_D:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_D;
+ break;
+ default:
+ WARN(1, "nonexisting DP port %c\n",
+ port_name(dig_port->port));
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_CHERRYVIEW(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ tmp |= DC_BALANCE_RESET_VLV;
+ switch (pipe) {
+ case PIPE_A:
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ break;
+ case PIPE_B:
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+ break;
+ case PIPE_C:
+ tmp |= PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return -EINVAL;
+ }
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_TV:
+ if (!SUPPORTS_TV(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ if (!IS_G4X(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ if (!IS_G4X(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_G4X(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ WARN_ON(!IS_G4X(dev_priv));
+
+ I915_WRITE(PORT_DFT_I9XX,
+ I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
+
+ if (pipe == PIPE_A)
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ switch (pipe) {
+ case PIPE_A:
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ break;
+ case PIPE_B:
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ break;
+ case PIPE_C:
+ tmp &= ~PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return;
+ }
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+ tmp &= ~DC_BALANCE_RESET_VLV;
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+
+}
+
+static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ if (pipe == PIPE_A)
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
+ I915_WRITE(PORT_DFT_I9XX,
+ I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
+ }
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ struct intel_crtc_state *pipe_config;
+ struct drm_atomic_state *state;
+ int ret = 0;
+
+ drm_modeset_lock_all(dev);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+ pipe_config = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto put_state;
+ }
+
+ pipe_config->pch_pfit.force_thru = enable;
+ if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+ pipe_config->pch_pfit.enabled != enable)
+ pipe_config->base.connectors_changed = true;
+
+ ret = drm_atomic_commit(state);
+
+put_state:
+ drm_atomic_state_put(state);
+unlock:
+ WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+ drm_modeset_unlock_all(dev);
+}
+
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PF;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PF:
+ if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
+
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source, u32 *val)
+{
+ if (IS_GEN2(dev_priv))
+ return i8xx_pipe_crc_ctl_reg(source, val);
+ else if (INTEL_GEN(dev_priv) < 5)
+ return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+ return ilk_pipe_crc_ctl_reg(source, val);
+ else
+ return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+}
+
+static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source source)
+{
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ enum intel_display_power_domain power_domain;
+ u32 val = 0; /* shut up gcc */
+ int ret;
+
+ if (pipe_crc->source == source)
+ return 0;
+
+ /* forbid changing the source without going back to 'none' */
+ if (pipe_crc->source && source)
+ return -EINVAL;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+ return -EIO;
+ }
+
+ ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ if (ret != 0)
+ goto out;
+
+ /* none -> real source transition */
+ if (source) {
+ struct intel_pipe_crc_entry *entries;
+
+ DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
+ pipe_name(pipe), pipe_crc_source_name(source));
+
+ entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
+ sizeof(pipe_crc->entries[0]),
+ GFP_KERNEL);
+ if (!entries) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ hsw_disable_ips(crtc);
+
+ spin_lock_irq(&pipe_crc->lock);
+ kfree(pipe_crc->entries);
+ pipe_crc->entries = entries;
+ pipe_crc->head = 0;
+ pipe_crc->tail = 0;
+ spin_unlock_irq(&pipe_crc->lock);
+ }
+
+ pipe_crc->source = source;
+
+ I915_WRITE(PIPE_CRC_CTL(pipe), val);
+ POSTING_READ(PIPE_CRC_CTL(pipe));
+
+ /* real source -> none transition */
+ if (!source) {
+ struct intel_pipe_crc_entry *entries;
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
+
+ DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
+ pipe_name(pipe));
+
+ drm_modeset_lock(&crtc->base.mutex, NULL);
+ if (crtc->base.state->active)
+ intel_wait_for_vblank(dev_priv, pipe);
+ drm_modeset_unlock(&crtc->base.mutex);
+
+ spin_lock_irq(&pipe_crc->lock);
+ entries = pipe_crc->entries;
+ pipe_crc->entries = NULL;
+ pipe_crc->head = 0;
+ pipe_crc->tail = 0;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ kfree(entries);
+
+ if (IS_G4X(dev_priv))
+ g4x_undo_pipe_scramble_reset(dev_priv, pipe);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+ else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
+
+ hsw_enable_ips(crtc);
+ }
+
+ ret = 0;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
+/*
+ * Parse pipe CRC command strings:
+ * command: wsp* object wsp+ name wsp+ source wsp*
+ * object: 'pipe'
+ * name: (A | B | C)
+ * source: (none | plane1 | plane2 | pf)
+ * wsp: (#0x20 | #0x9 | #0xA)+
+ *
+ * eg.:
+ * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
+ * "pipe A none" -> Stop CRC
+ */
+static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
+{
+ int n_words = 0;
+
+ while (*buf) {
+ char *end;
+
+ /* skip leading white space */
+ buf = skip_spaces(buf);
+ if (!*buf)
+ break; /* end of buffer */
+
+ /* find end of word */
+ for (end = buf; *end && !isspace(*end); end++)
+ ;
+
+ if (n_words == max_words) {
+ DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
+ max_words);
+ return -EINVAL; /* ran out of words[] before bytes */
+ }
+
+ if (*end)
+ *end++ = '\0';
+ words[n_words++] = buf;
+ buf = end;
+ }
+
+ return n_words;
+}
+
+enum intel_pipe_crc_object {
+ PIPE_CRC_OBJECT_PIPE,
+};
+
+static const char * const pipe_crc_objects[] = {
+ "pipe",
+};
+
+static int
+display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
+ if (!strcmp(buf, pipe_crc_objects[i])) {
+ *o = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+{
+ const char name = buf[0];
+
+ if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+ return -EINVAL;
+
+ *pipe = name - 'A';
+
+ return 0;
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+ int i;
+
+ if (!buf) {
+ *s = INTEL_PIPE_CRC_SOURCE_NONE;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
+ if (!strcmp(buf, pipe_crc_sources[i])) {
+ *s = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
+ char *buf, size_t len)
+{
+#define N_WORDS 3
+ int n_words;
+ char *words[N_WORDS];
+ enum pipe pipe;
+ enum intel_pipe_crc_object object;
+ enum intel_pipe_crc_source source;
+
+ n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
+ if (n_words != N_WORDS) {
+ DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
+ N_WORDS);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_object(words[0], &object) < 0) {
+ DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+ DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_source(words[2], &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
+ return -EINVAL;
+ }
+
+ return pipe_crc_set_source(dev_priv, pipe, source);
+}
+
+static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ char *tmpbuf;
+ int ret;
+
+ if (len == 0)
+ return 0;
+
+ if (len > PAGE_SIZE - 1) {
+ DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
+ PAGE_SIZE);
+ return -E2BIG;
+ }
+
+ tmpbuf = kmalloc(len + 1, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(tmpbuf, ubuf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ tmpbuf[len] = '\0';
+
+ ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
+
+out:
+ kfree(tmpbuf);
+ if (ret < 0)
+ return ret;
+
+ *offp += len;
+ return len;
+}
+
+const struct file_operations i915_display_crc_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = display_crc_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = display_crc_ctl_write
+};
+
+void intel_display_crc_init(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+
+ pipe_crc->opened = false;
+ spin_lock_init(&pipe_crc->lock);
+ init_waitqueue_head(&pipe_crc->wq);
+ }
+}
+
+int intel_pipe_crc_create(struct drm_minor *minor)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+ ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void intel_pipe_crc_cleanup(struct drm_minor *minor)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+ struct drm_info_list *info_list =
+ (struct drm_info_list *)&i915_pipe_crc_data[i];
+
+ drm_debugfs_remove_files(info_list, 1, minor);
+ }
+}
+
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum intel_display_power_domain power_domain;
+ enum intel_pipe_crc_source source;
+ u32 val = 0; /* shut up gcc */
+ int ret = 0;
+
+ if (display_crc_ctl_parse_source(source_name, &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+ return -EINVAL;
+ }
+
+ power_domain = POWER_DOMAIN_PIPE(crtc->index);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+ return -EIO;
+ }
+
+ ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
+ if (ret != 0)
+ goto out;
+
+ if (source) {
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ hsw_disable_ips(intel_crtc);
+ }
+
+ I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
+ POSTING_READ(PIPE_CRC_CTL(crtc->index));
+
+ if (!source) {
+ if (IS_G4X(dev_priv))
+ g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
+ else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
+
+ hsw_enable_ips(intel_crtc);
+ }
+
+ pipe_crc->skipped = 0;
+ *values_cnt = 5;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ae2c0bb4b2e8..249623d45be0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -312,23 +312,30 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
#define FW_WM(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
+ bool was_enabled;
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
POSTING_READ(FW_BLC_SELF_VLV);
- dev_priv->wm.vlv.cxsr = enable;
- } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) {
+ } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
+ was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
POSTING_READ(FW_BLC_SELF);
} else if (IS_PINEVIEW(dev_priv)) {
- val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
- val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
+ val = I915_READ(DSPFW3);
+ was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
+ if (enable)
+ val |= PINEVIEW_SELF_REFRESH_EN;
+ else
+ val &= ~PINEVIEW_SELF_REFRESH_EN;
I915_WRITE(DSPFW3, val);
POSTING_READ(DSPFW3);
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
+ was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
@@ -339,17 +346,33 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
* and yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
+ was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
I915_WRITE(INSTPM, val);
POSTING_READ(INSTPM);
} else {
- return;
+ return false;
}
- DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable));
+ DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
+ enableddisabled(enable),
+ enableddisabled(was_enabled));
+
+ return was_enabled;
}
+bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+{
+ bool ret;
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(dev_priv, enable);
+ dev_priv->wm.vlv.cxsr = enable;
+ mutex_unlock(&dev_priv->wm.wm_mutex);
+
+ return ret;
+}
/*
* Latency for FIFO fetches is dependent on several factors:
@@ -370,12 +393,15 @@ static const int pessimal_latency_ns = 5000;
#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-static int vlv_get_fifo_size(struct drm_i915_private *dev_priv,
- enum pipe pipe, int plane)
+static int vlv_get_fifo_size(struct intel_plane *plane)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int sprite0_start, sprite1_start, size;
- switch (pipe) {
+ if (plane->id == PLANE_CURSOR)
+ return 63;
+
+ switch (plane->pipe) {
uint32_t dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
@@ -399,24 +425,21 @@ static int vlv_get_fifo_size(struct drm_i915_private *dev_priv,
return 0;
}
- switch (plane) {
- case 0:
+ switch (plane->id) {
+ case PLANE_PRIMARY:
size = sprite0_start;
break;
- case 1:
+ case PLANE_SPRITE0:
size = sprite1_start - sprite0_start;
break;
- case 2:
+ case PLANE_SPRITE1:
size = 512 - 1 - sprite1_start;
break;
default:
return 0;
}
- DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
- pipe_name(pipe), plane == 0 ? "primary" : "sprite",
- plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
- size);
+ DRM_DEBUG_KMS("%s FIFO size: %d\n", plane->base.name, size);
return size;
}
@@ -652,7 +675,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
&crtc->config->base.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = fb->format->cpp[0];
int clock = adjusted_mode->crtc_clock;
/* Display SR */
@@ -727,7 +750,7 @@ static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = crtc->config->pipe_src_w;
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
@@ -816,7 +839,7 @@ static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = crtc->config->pipe_src_w;
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -842,71 +865,77 @@ static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
#define FW_WM_VLV(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-static void vlv_write_wm_values(struct intel_crtc *crtc,
+static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
const struct vlv_wm_values *wm)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe;
- I915_WRITE(VLV_DDL(pipe),
- (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
+ for_each_pipe(dev_priv, pipe) {
+ I915_WRITE(VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ }
+
+ /*
+ * Zero the (unused) WM1 watermarks, and also clear all the
+ * high order bits so that there are no out of bounds values
+ * present in the registers during the reprogramming.
+ */
+ I915_WRITE(DSPHOWM, 0);
+ I915_WRITE(DSPHOWM1, 0);
+ I915_WRITE(DSPFW4, 0);
+ I915_WRITE(DSPFW5, 0);
+ I915_WRITE(DSPFW6, 0);
I915_WRITE(DSPFW1,
FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
I915_WRITE(DSPFW2,
- FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
I915_WRITE(DSPFW3,
FW_WM(wm->sr.cursor, CURSOR_SR));
if (IS_CHERRYVIEW(dev_priv)) {
I915_WRITE(DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
I915_WRITE(DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
I915_WRITE(DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
- FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
I915_WRITE(DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
I915_WRITE(DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
I915_WRITE(DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- /* zero (unused) WM1 watermarks */
- I915_WRITE(DSPFW4, 0);
- I915_WRITE(DSPFW5, 0);
- I915_WRITE(DSPFW6, 0);
- I915_WRITE(DSPHOWM1, 0);
-
POSTING_READ(DSPFW1);
}
@@ -949,24 +978,26 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
-static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
- struct intel_crtc *crtc,
- const struct intel_plane_state *state,
+static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int clock, htotal, cpp, width, wm;
if (dev_priv->wm.pri_latency[level] == 0)
return USHRT_MAX;
- if (!state->base.visible)
+ if (!plane_state->base.visible)
return 0;
- cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
- clock = crtc->config->base.adjusted_mode.crtc_clock;
- htotal = crtc->config->base.adjusted_mode.crtc_htotal;
- width = crtc->config->pipe_src_w;
+ cpp = plane_state->base.fb->format->cpp[0];
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->crtc_htotal;
+ width = crtc_state->pipe_src_w;
if (WARN_ON(htotal == 0))
htotal = 1;
@@ -1004,7 +1035,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc)
if (state->base.visible) {
wm_state->num_active_planes++;
- total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+ total_rate += state->base.fb->format->cpp[0];
}
}
@@ -1023,7 +1054,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc)
continue;
}
- rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+ rate = state->base.fb->format->cpp[0];
plane->wm.fifo_size = fifo_size * rate / total_rate;
fifo_left -= plane->wm.fifo_size;
}
@@ -1053,48 +1084,45 @@ static void vlv_compute_fifo(struct intel_crtc *crtc)
WARN_ON(fifo_left != 0);
}
+static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
+{
+ if (wm > fifo_size)
+ return USHRT_MAX;
+ else
+ return fifo_size - wm;
+}
+
static void vlv_invert_wms(struct intel_crtc *crtc)
{
struct vlv_wm_state *wm_state = &crtc->wm_state;
int level;
for (level = 0; level < wm_state->num_levels; level++) {
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const int sr_fifo_size =
- INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1;
+ INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
struct intel_plane *plane;
- wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
- wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
-
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- switch (plane->base.type) {
- int sprite;
- case DRM_PLANE_TYPE_CURSOR:
- wm_state->wm[level].cursor = plane->wm.fifo_size -
- wm_state->wm[level].cursor;
- break;
- case DRM_PLANE_TYPE_PRIMARY:
- wm_state->wm[level].primary = plane->wm.fifo_size -
- wm_state->wm[level].primary;
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- sprite = plane->plane;
- wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
- wm_state->wm[level].sprite[sprite];
- break;
- }
+ wm_state->sr[level].plane =
+ vlv_invert_wm_value(wm_state->sr[level].plane,
+ sr_fifo_size);
+ wm_state->sr[level].cursor =
+ vlv_invert_wm_value(wm_state->sr[level].cursor,
+ 63);
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ wm_state->wm[level].plane[plane->id] =
+ vlv_invert_wm_value(wm_state->wm[level].plane[plane->id],
+ plane->wm.fifo_size);
}
}
}
static void vlv_compute_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
- int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
int level;
memset(wm_state, 0, sizeof(*wm_state));
@@ -1109,45 +1137,27 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
if (wm_state->num_active_planes != 1)
wm_state->cxsr = false;
- if (wm_state->cxsr) {
- for (level = 0; level < wm_state->num_levels; level++) {
- wm_state->sr[level].plane = sr_fifo_size;
- wm_state->sr[level].cursor = 63;
- }
- }
-
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
struct intel_plane_state *state =
to_intel_plane_state(plane->base.state);
+ int level;
if (!state->base.visible)
continue;
/* normal watermarks */
for (level = 0; level < wm_state->num_levels; level++) {
- int wm = vlv_compute_wm_level(plane, crtc, state, level);
- int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
+ int wm = vlv_compute_wm_level(crtc->config, state, level);
+ int max_wm = plane->wm.fifo_size;
/* hack */
if (WARN_ON(level == 0 && wm > max_wm))
wm = max_wm;
- if (wm > plane->wm.fifo_size)
+ if (wm > max_wm)
break;
- switch (plane->base.type) {
- int sprite;
- case DRM_PLANE_TYPE_CURSOR:
- wm_state->wm[level].cursor = wm;
- break;
- case DRM_PLANE_TYPE_PRIMARY:
- wm_state->wm[level].primary = wm;
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- sprite = plane->plane;
- wm_state->wm[level].sprite[sprite] = wm;
- break;
- }
+ wm_state->wm[level].plane[plane->id] = wm;
}
wm_state->num_levels = level;
@@ -1156,26 +1166,15 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
continue;
/* maxfifo watermarks */
- switch (plane->base.type) {
- int sprite, level;
- case DRM_PLANE_TYPE_CURSOR:
+ if (plane->id == PLANE_CURSOR) {
for (level = 0; level < wm_state->num_levels; level++)
wm_state->sr[level].cursor =
- wm_state->wm[level].cursor;
- break;
- case DRM_PLANE_TYPE_PRIMARY:
- for (level = 0; level < wm_state->num_levels; level++)
- wm_state->sr[level].plane =
- min(wm_state->sr[level].plane,
- wm_state->wm[level].primary);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- sprite = plane->plane;
+ wm_state->wm[level].plane[PLANE_CURSOR];
+ } else {
for (level = 0; level < wm_state->num_levels; level++)
wm_state->sr[level].plane =
- min(wm_state->sr[level].plane,
- wm_state->wm[level].sprite[sprite]);
- break;
+ max(wm_state->sr[level].plane,
+ wm_state->wm[level].plane[plane->id]);
}
}
@@ -1199,17 +1198,23 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
- if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
- WARN_ON(plane->wm.fifo_size != 63);
- continue;
- }
-
- if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ switch (plane->id) {
+ case PLANE_PRIMARY:
sprite0_start = plane->wm.fifo_size;
- else if (plane->plane == 0)
+ break;
+ case PLANE_SPRITE0:
sprite1_start = sprite0_start + plane->wm.fifo_size;
- else
+ break;
+ case PLANE_SPRITE1:
fifo_size = sprite1_start + plane->wm.fifo_size;
+ break;
+ case PLANE_CURSOR:
+ WARN_ON(plane->wm.fifo_size != 63);
+ break;
+ default:
+ MISSING_CASE(plane->id);
+ break;
+ }
}
WARN_ON(fifo_size != 512 - 1);
@@ -1218,6 +1223,8 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
pipe_name(crtc->pipe), sprite0_start,
sprite1_start, fifo_size);
+ spin_lock(&dev_priv->wm.dsparb_lock);
+
switch (crtc->pipe) {
uint32_t dsparb, dsparb2, dsparb3;
case PIPE_A:
@@ -1274,20 +1281,24 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
default:
break;
}
+
+ POSTING_READ(DSPARB);
+
+ spin_unlock(&dev_priv->wm.dsparb_lock);
}
#undef VLV_FIFO
-static void vlv_merge_wm(struct drm_device *dev,
+static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
int num_active_crtcs = 0;
- wm->level = to_i915(dev)->wm.max_level;
+ wm->level = dev_priv->wm.max_level;
wm->cxsr = true;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm_state;
if (!crtc->active)
@@ -1306,7 +1317,7 @@ static void vlv_merge_wm(struct drm_device *dev,
if (num_active_crtcs > 1)
wm->level = VLV_WM_LEVEL_PM2;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
struct vlv_wm_state *wm_state = &crtc->wm_state;
enum pipe pipe = crtc->pipe;
@@ -1317,63 +1328,70 @@ static void vlv_merge_wm(struct drm_device *dev,
if (wm->cxsr)
wm->sr = wm_state->sr[wm->level];
- wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
}
}
+static bool is_disabling(int old, int new, int threshold)
+{
+ return old >= threshold && new < threshold;
+}
+
+static bool is_enabling(int old, int new, int threshold)
+{
+ return old < threshold && new >= threshold;
+}
+
static void vlv_update_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- struct vlv_wm_values wm = {};
+ struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
+ struct vlv_wm_values new_wm = {};
vlv_compute_wm(crtc);
- vlv_merge_wm(dev, &wm);
+ vlv_merge_wm(dev_priv, &new_wm);
- if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
+ if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) {
/* FIXME should be part of crtc atomic commit */
vlv_pipe_set_fifo_size(crtc);
+
return;
}
- if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
- dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
+ if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
chv_set_memory_dvfs(dev_priv, false);
- if (wm.level < VLV_WM_LEVEL_PM5 &&
- dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
+ if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
chv_set_memory_pm5(dev_priv, false);
- if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
- intel_set_memory_cxsr(dev_priv, false);
+ if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, false);
/* FIXME should be part of crtc atomic commit */
vlv_pipe_set_fifo_size(crtc);
- vlv_write_wm_values(crtc, &wm);
+ vlv_write_wm_values(dev_priv, &new_wm);
DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
"sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
- pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
- wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
- wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
+ pipe_name(pipe), new_wm.pipe[pipe].plane[PLANE_PRIMARY], new_wm.pipe[pipe].plane[PLANE_CURSOR],
+ new_wm.pipe[pipe].plane[PLANE_SPRITE0], new_wm.pipe[pipe].plane[PLANE_SPRITE1],
+ new_wm.sr.plane, new_wm.sr.cursor, new_wm.level, new_wm.cxsr);
- if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
- intel_set_memory_cxsr(dev_priv, true);
+ if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, true);
- if (wm.level >= VLV_WM_LEVEL_PM5 &&
- dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
+ if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
chv_set_memory_pm5(dev_priv, true);
- if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
- dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
+ if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
chv_set_memory_dvfs(dev_priv, true);
- dev_priv->wm.vlv = wm;
+ *old_wm = new_wm;
}
#define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1455,7 +1473,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = crtc->config->pipe_src_w;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = fb->format->cpp[0];
unsigned long line_time_us;
int entries;
@@ -1541,7 +1559,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_GEN2(dev_priv))
cpp = 4;
else
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
@@ -1568,7 +1586,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_GEN2(dev_priv))
cpp = 4;
else
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
@@ -1621,7 +1639,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
cpp = 4;
else
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
line_time_us = max(htotal * 1000 / clock, 1);
@@ -1781,13 +1799,14 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
uint32_t mem_value,
bool is_lp)
{
- int cpp = pstate->base.fb ?
- drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
uint32_t method1, method2;
+ int cpp;
if (!cstate->base.active || !pstate->base.visible)
return 0;
+ cpp = pstate->base.fb->format->cpp[0];
+
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
if (!is_lp)
@@ -1809,13 +1828,14 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
- int cpp = pstate->base.fb ?
- drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
uint32_t method1, method2;
+ int cpp;
if (!cstate->base.active || !pstate->base.visible)
return 0;
+ cpp = pstate->base.fb->format->cpp[0];
+
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
@@ -1853,12 +1873,13 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t pri_val)
{
- int cpp = pstate->base.fb ?
- drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
+ int cpp;
if (!cstate->base.active || !pstate->base.visible)
return 0;
+ cpp = pstate->base.fb->format->cpp[0];
+
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
}
@@ -2867,28 +2888,6 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
#define SKL_SAGV_BLOCK_TIME 30 /* µs */
/*
- * Return the index of a plane in the SKL DDB and wm result arrays. Primary
- * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
- * other universal planes are in indices 1..n. Note that this may leave unused
- * indices between the top "sprite" plane and the cursor.
- */
-static int
-skl_wm_plane_id(const struct intel_plane *plane)
-{
- switch (plane->base.type) {
- case DRM_PLANE_TYPE_PRIMARY:
- return 0;
- case DRM_PLANE_TYPE_CURSOR:
- return PLANE_CURSOR;
- case DRM_PLANE_TYPE_OVERLAY:
- return plane->plane + 1;
- default:
- MISSING_CASE(plane->base.type);
- return plane->plane;
- }
-}
-
-/*
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
*/
@@ -3010,7 +3009,6 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
struct intel_crtc *crtc;
struct intel_plane *plane;
struct intel_crtc_state *cstate;
- struct skl_plane_wm *wm;
enum pipe pipe;
int level, latency;
@@ -3037,7 +3035,8 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
return false;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
- wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)];
+ struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane->id];
/* Skip this plane if it's not enabled */
if (!wm->wm[0].plane_en)
@@ -3140,28 +3139,29 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
{
- enum pipe pipe;
- int plane;
- u32 val;
+ struct intel_crtc *crtc;
memset(ddb, 0, sizeof(*ddb));
- for_each_pipe(dev_priv, pipe) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
enum intel_display_power_domain power_domain;
+ enum plane_id plane_id;
+ enum pipe pipe = crtc->pipe;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
- for_each_universal_plane(dev_priv, pipe, plane) {
- val = I915_READ(PLANE_BUF_CFG(pipe, plane));
- skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
- val);
- }
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ u32 val;
- val = I915_READ(CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
- val);
+ if (plane_id != PLANE_CURSOR)
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ else
+ val = I915_READ(CUR_BUF_CFG(pipe));
+
+ skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
+ }
intel_display_power_put(dev_priv, power_domain);
}
@@ -3213,13 +3213,17 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
int y)
{
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
- struct drm_framebuffer *fb = pstate->fb;
uint32_t down_scale_amount, data_rate;
uint32_t width = 0, height = 0;
- unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
+ struct drm_framebuffer *fb;
+ u32 format;
if (!intel_pstate->base.visible)
return 0;
+
+ fb = pstate->fb;
+ format = fb->format->format;
+
if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
return 0;
if (y && format != DRM_FORMAT_NV12)
@@ -3235,13 +3239,13 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
if (format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
data_rate = width * height *
- drm_format_plane_cpp(format, 0);
+ fb->format->cpp[0];
else /* uv-plane data rate */
data_rate = (width / 2) * (height / 2) *
- drm_format_plane_cpp(format, 1);
+ fb->format->cpp[1];
} else {
/* for packed formats */
- data_rate = width * height * drm_format_plane_cpp(format, 0);
+ data_rate = width * height * fb->format->cpp[0];
}
down_scale_amount = skl_plane_downscale_amount(intel_pstate);
@@ -3262,30 +3266,28 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
struct drm_crtc_state *cstate = &intel_cstate->base;
struct drm_atomic_state *state = cstate->state;
struct drm_plane *plane;
- const struct intel_plane *intel_plane;
const struct drm_plane_state *pstate;
- unsigned int rate, total_data_rate = 0;
- int id;
+ unsigned int total_data_rate = 0;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
- id = skl_wm_plane_id(to_intel_plane(plane));
- intel_plane = to_intel_plane(plane);
+ enum plane_id plane_id = to_intel_plane(plane)->id;
+ unsigned int rate;
/* packed/uv */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 0);
- plane_data_rate[id] = rate;
+ plane_data_rate[plane_id] = rate;
total_data_rate += rate;
/* y-plane */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 1);
- plane_y_data_rate[id] = rate;
+ plane_y_data_rate[plane_id] = rate;
total_data_rate += rate;
}
@@ -3307,7 +3309,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
return 0;
/* For packed formats, no y-plane, return 0 */
- if (y && fb->pixel_format != DRM_FORMAT_NV12)
+ if (y && fb->format->format != DRM_FORMAT_NV12)
return 0;
/* For Non Y-tile return 8-blocks */
@@ -3322,15 +3324,15 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
swap(src_w, src_h);
/* Halve UV plane width and height for NV12 */
- if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
+ if (fb->format->format == DRM_FORMAT_NV12 && !y) {
src_w /= 2;
src_h /= 2;
}
- if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
- plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+ if (fb->format->format == DRM_FORMAT_NV12 && !y)
+ plane_bpp = fb->format->cpp[1];
else
- plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ plane_bpp = fb->format->cpp[0];
if (drm_rotation_90_or_270(pstate->rotation)) {
switch (plane_bpp) {
@@ -3364,17 +3366,16 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
struct drm_plane *plane;
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
- struct intel_plane *intel_plane = to_intel_plane(plane);
- int id = skl_wm_plane_id(intel_plane);
+ enum plane_id plane_id = to_intel_plane(plane)->id;
- if (id == PLANE_CURSOR)
+ if (plane_id == PLANE_CURSOR)
continue;
if (!pstate->visible)
continue;
- minimum[id] = skl_ddb_min_alloc(pstate, 0);
- y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+ minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
+ y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
}
minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -3394,8 +3395,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uint16_t minimum[I915_MAX_PLANES] = {};
uint16_t y_minimum[I915_MAX_PLANES] = {};
unsigned int total_data_rate;
+ enum plane_id plane_id;
int num_active;
- int id, i;
unsigned plane_data_rate[I915_MAX_PLANES] = {};
unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
@@ -3426,9 +3427,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* proportional to the data rate.
*/
- for (i = 0; i < I915_MAX_PLANES; i++) {
- alloc_size -= minimum[i];
- alloc_size -= y_minimum[i];
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ alloc_size -= minimum[plane_id];
+ alloc_size -= y_minimum[plane_id];
}
ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
@@ -3447,28 +3448,28 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
start = alloc->start;
- for (id = 0; id < I915_MAX_PLANES; id++) {
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
- if (id == PLANE_CURSOR)
+ if (plane_id == PLANE_CURSOR)
continue;
- data_rate = plane_data_rate[id];
+ data_rate = plane_data_rate[plane_id];
/*
* allocation for (packed formats) or (uv-plane part of planar format):
* promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1
*/
- plane_blocks = minimum[id];
+ plane_blocks = minimum[plane_id];
plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
total_data_rate);
/* Leave disabled planes at (0,0) */
if (data_rate) {
- ddb->plane[pipe][id].start = start;
- ddb->plane[pipe][id].end = start + plane_blocks;
+ ddb->plane[pipe][plane_id].start = start;
+ ddb->plane[pipe][plane_id].end = start + plane_blocks;
}
start += plane_blocks;
@@ -3476,15 +3477,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/*
* allocation for y_plane part of planar format:
*/
- y_data_rate = plane_y_data_rate[id];
+ y_data_rate = plane_y_data_rate[plane_id];
- y_plane_blocks = y_minimum[id];
+ y_plane_blocks = y_minimum[plane_id];
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
total_data_rate);
if (y_data_rate) {
- ddb->y_plane[pipe][id].start = start;
- ddb->y_plane[pipe][id].end = start + y_plane_blocks;
+ ddb->y_plane[pipe][plane_id].start = start;
+ ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks;
}
start += y_plane_blocks;
@@ -3499,32 +3500,35 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* should allow pixel_rate up to ~2 GHz which seems sufficient since max
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
-static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
+static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
+ uint32_t latency)
{
- uint32_t wm_intermediate_val, ret;
+ uint32_t wm_intermediate_val;
+ uint_fixed_16_16_t ret;
if (latency == 0)
- return UINT_MAX;
-
- wm_intermediate_val = latency * pixel_rate * cpp / 512;
- ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
+ return FP_16_16_MAX;
+ wm_intermediate_val = latency * pixel_rate * cpp;
+ ret = fixed_16_16_div_round_up_u64(wm_intermediate_val, 1000 * 512);
return ret;
}
-static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
- uint32_t latency, uint32_t plane_blocks_per_line)
+static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
+ uint32_t pipe_htotal,
+ uint32_t latency,
+ uint_fixed_16_16_t plane_blocks_per_line)
{
- uint32_t ret;
uint32_t wm_intermediate_val;
+ uint_fixed_16_16_t ret;
if (latency == 0)
- return UINT_MAX;
+ return FP_16_16_MAX;
wm_intermediate_val = latency * pixel_rate;
- ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
- plane_blocks_per_line;
-
+ wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
+ pipe_htotal * 1000);
+ ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line);
return ret;
}
@@ -3564,24 +3568,36 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
struct drm_plane_state *pstate = &intel_pstate->base;
struct drm_framebuffer *fb = pstate->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
- uint32_t method1, method2;
- uint32_t plane_bytes_per_line, plane_blocks_per_line;
+ uint_fixed_16_16_t method1, method2;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t selected_result;
+ uint32_t interm_pbpl;
+ uint32_t plane_bytes_per_line;
uint32_t res_blocks, res_lines;
- uint32_t selected_result;
uint8_t cpp;
uint32_t width = 0, height = 0;
uint32_t plane_pixel_rate;
- uint32_t y_tile_minimum, y_min_scanlines;
+ uint_fixed_16_16_t y_tile_minimum;
+ uint32_t y_min_scanlines;
struct intel_atomic_state *state =
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
+ bool y_tiled, x_tiled;
if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
*enabled = false;
return 0;
}
- if (apply_memory_bw_wa && fb->modifier == I915_FORMAT_MOD_X_TILED)
+ y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED;
+ x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
+
+ /* Display WA #1141: kbl. */
+ if (IS_KABYLAKE(dev_priv) && dev_priv->ipc_enabled)
+ latency += 4;
+
+ if (apply_memory_bw_wa && x_tiled)
latency += 15;
width = drm_rect_width(&intel_pstate->base.src) >> 16;
@@ -3590,13 +3606,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
if (drm_rotation_90_or_270(pstate->rotation)) {
- int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
- drm_format_plane_cpp(fb->pixel_format, 1) :
- drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = (fb->format->format == DRM_FORMAT_NV12) ?
+ fb->format->cpp[1] :
+ fb->format->cpp[0];
switch (cpp) {
case 1:
@@ -3620,16 +3636,17 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
y_min_scanlines *= 2;
plane_bytes_per_line = width * cpp;
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
+ if (y_tiled) {
+ interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
+ y_min_scanlines, 512);
plane_blocks_per_line =
- DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
- plane_blocks_per_line /= y_min_scanlines;
- } else if (fb->modifier == DRM_FORMAT_MOD_NONE) {
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
- + 1;
+ fixed_16_16_div_round_up(interm_pbpl, y_min_scanlines);
+ } else if (x_tiled) {
+ interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
} else {
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
+ plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
}
method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
@@ -3638,28 +3655,29 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
latency,
plane_blocks_per_line);
- y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
+ y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines,
+ plane_blocks_per_line);
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
- selected_result = max(method2, y_tile_minimum);
+ if (y_tiled) {
+ selected_result = max_fixed_16_16(method2, y_tile_minimum);
} else {
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
(plane_bytes_per_line / 512 < 1))
selected_result = method2;
- else if ((ddb_allocation / plane_blocks_per_line) >= 1)
- selected_result = min(method1, method2);
+ else if ((ddb_allocation /
+ fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
+ selected_result = min_fixed_16_16(method1, method2);
else
selected_result = method1;
}
- res_blocks = selected_result + 1;
- res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
+ res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1;
+ res_lines = DIV_ROUND_UP(selected_result.val,
+ plane_blocks_per_line.val);
if (level >= 1 && level <= 7) {
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
- res_blocks += y_tile_minimum;
+ if (y_tiled) {
+ res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum);
res_lines += y_min_scanlines;
} else {
res_blocks++;
@@ -3676,12 +3694,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (level) {
return 0;
} else {
+ struct drm_plane *plane = pstate->plane;
+
DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
- DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
- to_intel_crtc(cstate->base.crtc)->pipe,
- skl_wm_plane_id(to_intel_plane(pstate->plane)),
+ DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
+ plane->base.id, plane->name,
res_blocks, ddb_allocation, res_lines);
-
return -EINVAL;
}
}
@@ -3708,7 +3726,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
int ret;
- int i = skl_wm_plane_id(intel_plane);
if (state)
intel_pstate =
@@ -3731,7 +3748,7 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
WARN_ON(!intel_pstate->base.fb);
- ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
ret = skl_compute_plane_wm(dev_priv,
cstate,
@@ -3750,7 +3767,10 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
static uint32_t
skl_compute_linetime_wm(struct intel_crtc_state *cstate)
{
+ struct drm_atomic_state *state = cstate->base.state;
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
uint32_t pixel_rate;
+ uint32_t linetime_wm;
if (!cstate->base.active)
return 0;
@@ -3760,8 +3780,14 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
if (WARN_ON(pixel_rate == 0))
return 0;
- return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
- pixel_rate);
+ linetime_wm = DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal *
+ 1000, pixel_rate);
+
+ /* Display WA #1135: bxt. */
+ if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
+ linetime_wm = DIV_ROUND_UP(linetime_wm, 2);
+
+ return linetime_wm;
}
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
@@ -3794,7 +3820,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
for_each_intel_plane_mask(&dev_priv->drm,
intel_plane,
cstate->base.plane_mask) {
- wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)];
+ wm = &pipe_wm->planes[intel_plane->id];
for (level = 0; level <= max_level; level++) {
ret = skl_compute_wm_level(dev_priv, ddb, cstate,
@@ -3838,7 +3864,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
const struct skl_plane_wm *wm,
const struct skl_ddb_allocation *ddb,
- int plane)
+ enum plane_id plane_id)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
@@ -3847,16 +3873,16 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
enum pipe pipe = intel_crtc->pipe;
for (level = 0; level <= max_level; level++) {
- skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level),
+ skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
&wm->wm[level]);
}
- skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane),
+ skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
- &ddb->plane[pipe][plane]);
- skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
- &ddb->y_plane[pipe][plane]);
+ skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
+ &ddb->plane[pipe][plane_id]);
+ skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id),
+ &ddb->y_plane[pipe][plane_id]);
}
static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
@@ -3961,17 +3987,16 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
struct drm_plane_state *plane_state;
struct drm_plane *plane;
enum pipe pipe = intel_crtc->pipe;
- int id;
WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
- id = skl_wm_plane_id(to_intel_plane(plane));
+ enum plane_id plane_id = to_intel_plane(plane)->id;
- if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
- &new_ddb->plane[pipe][id]) &&
- skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
- &new_ddb->y_plane[pipe][id]))
+ if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
+ &new_ddb->plane[pipe][plane_id]) &&
+ skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id],
+ &new_ddb->y_plane[pipe][plane_id]))
continue;
plane_state = drm_atomic_get_plane_state(state, plane);
@@ -4083,7 +4108,6 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
const struct intel_plane *intel_plane;
const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- int id;
int i;
for_each_crtc_in_state(state, crtc, cstate, i) {
@@ -4091,11 +4115,11 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
enum pipe pipe = intel_crtc->pipe;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ enum plane_id plane_id = intel_plane->id;
const struct skl_ddb_entry *old, *new;
- id = skl_wm_plane_id(intel_plane);
- old = &old_ddb->plane[pipe][id];
- new = &new_ddb->plane[pipe][id];
+ old = &old_ddb->plane[pipe][plane_id];
+ new = &new_ddb->plane[pipe][plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
@@ -4185,17 +4209,21 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
enum pipe pipe = crtc->pipe;
- int plane;
+ enum plane_id plane_id;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
- for_each_universal_plane(dev_priv, pipe, plane)
- skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane);
-
- skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb);
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id != PLANE_CURSOR)
+ skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
+ ddb, plane_id);
+ else
+ skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
+ ddb);
+ }
}
static void skl_initial_wm(struct intel_atomic_state *state,
@@ -4310,32 +4338,29 @@ static inline void skl_wm_level_from_reg_val(uint32_t val,
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
struct skl_pipe_wm *out)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
- struct skl_plane_wm *wm;
enum pipe pipe = intel_crtc->pipe;
- int level, id, max_level;
+ int level, max_level;
+ enum plane_id plane_id;
uint32_t val;
max_level = ilk_wm_max_level(dev_priv);
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- id = skl_wm_plane_id(intel_plane);
- wm = &out->planes[id];
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ struct skl_plane_wm *wm = &out->planes[plane_id];
for (level = 0; level <= max_level; level++) {
- if (id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM(pipe, id, level));
+ if (plane_id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM(pipe, plane_id, level));
else
val = I915_READ(CUR_WM(pipe, level));
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
- if (id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM_TRANS(pipe, id));
+ if (plane_id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
else
val = I915_READ(CUR_WM_TRANS(pipe));
@@ -4443,67 +4468,67 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
tmp = I915_READ(VLV_DDL(pipe));
- wm->ddl[pipe].primary =
+ wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].cursor =
+ wm->ddl[pipe].plane[PLANE_CURSOR] =
(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].sprite[0] =
+ wm->ddl[pipe].plane[PLANE_SPRITE0] =
(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].sprite[1] =
+ wm->ddl[pipe].plane[PLANE_SPRITE1] =
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
tmp = I915_READ(DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
- wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
- wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
- wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
+ wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
tmp = I915_READ(DSPFW2);
- wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
- wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
- wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
+ wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
tmp = I915_READ(DSPFW3);
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
if (IS_CHERRYVIEW(dev_priv)) {
tmp = I915_READ(DSPFW7_CHV);
- wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
- wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
tmp = I915_READ(DSPFW8_CHV);
- wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
- wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
tmp = I915_READ(DSPFW9_CHV);
- wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
- wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
+ wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
+ wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
tmp = I915_READ(DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
- wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
- wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
- wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
- wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
- wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
- wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
- wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
- wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
- wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
tmp = I915_READ(DSPFW7);
- wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
- wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
tmp = I915_READ(DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
- wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
- wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
- wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
- wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
- wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
- wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
}
}
@@ -4520,21 +4545,8 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
vlv_read_wm_values(dev_priv, wm);
- for_each_intel_plane(dev, plane) {
- switch (plane->base.type) {
- int sprite;
- case DRM_PLANE_TYPE_CURSOR:
- plane->wm.fifo_size = 63;
- break;
- case DRM_PLANE_TYPE_PRIMARY:
- plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, 0);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- sprite = plane->plane;
- plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, sprite + 1);
- break;
- }
- }
+ for_each_intel_plane(dev, plane)
+ plane->wm.fifo_size = vlv_get_fifo_size(plane);
wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
@@ -4575,8 +4587,11 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
for_each_pipe(dev_priv, pipe)
DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
- pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
- wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0],
+ wm->pipe[pipe].plane[PLANE_SPRITE1]);
DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
@@ -4996,8 +5011,18 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
if (dev_priv->rps.cur_freq <= val)
return;
- /* Wake up the media well, as that takes a lot less
- * power than the Render well. */
+ /* The punit delays the write of the frequency and voltage until it
+ * determines the GPU is awake. During normal usage we don't want to
+ * waste power changing the frequency if the GPU is sleeping (rc6).
+ * However, the GPU and driver is now idle and we do not want to delay
+ * switching to minimum voltage (reducing power whilst idle) as we do
+ * not expect to be woken in the near future and so must flush the
+ * change by waking the device.
+ *
+ * We choose to take the media powerwell (either would do to trick the
+ * punit into committing the voltage change) as that takes a lot less
+ * power than the render powerwell.
+ */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
valleyview_set_rps(dev_priv, val);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
@@ -5219,7 +5244,7 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
if (!enable_rc6)
return 0;
- if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
DRM_INFO("RC6 disabled by BIOS\n");
return 0;
}
@@ -5253,7 +5278,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
/* All of these values are in units of 50MHz */
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
@@ -5816,7 +5841,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
int pcbr_offset;
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
- pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
+ pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
pcbr_offset,
I915_GTT_OFFSET_NONE,
pctx_size);
@@ -5833,7 +5858,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
* overlap with other ranges, such as the frame buffer, protected
* memory, or any other relevant ranges.
*/
- pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
+ pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
goto out;
@@ -6784,7 +6809,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
goto out;
rcs = dev_priv->engine[RCS];
- if (rcs->last_context)
+ if (rcs->last_retired_context)
goto out;
if (!rcs->init_context)
@@ -7595,8 +7620,6 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-
I915_WRITE(MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
@@ -7633,7 +7656,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
@@ -7651,9 +7674,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
- else if (IS_CRESTLINE(dev_priv))
+ else if (IS_I965GM(dev_priv))
dev_priv->display.init_clock_gating = crestline_init_clock_gating;
- else if (IS_BROADWATER(dev_priv))
+ else if (IS_I965G(dev_priv))
dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
else if (IS_GEN3(dev_priv))
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
@@ -7702,10 +7725,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
}
- } else if (IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.update_wm = vlv_update_wm;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = vlv_update_wm;
} else if (IS_PINEVIEW(dev_priv)) {
@@ -7849,6 +7869,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
}
I915_WRITE_FW(GEN6_PCODE_DATA, val);
+ I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
if (intel_wait_for_register_fw(dev_priv,
@@ -8041,10 +8062,8 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
queue_work(req->i915->wq, &boost->work);
}
-void intel_pm_setup(struct drm_device *dev)
+void intel_pm_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
mutex_init(&dev_priv->rps.hw_lock);
spin_lock_init(&dev_priv->rps.client_lock);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c6be70686b4a..c3780d0d2baf 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -122,13 +122,26 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x3;
- psr_vsc.sdp_header.HB3 = 0xb;
+ if (dev_priv->psr.colorimetry_support &&
+ dev_priv->psr.y_cord_support) {
+ psr_vsc.sdp_header.HB2 = 0x5;
+ psr_vsc.sdp_header.HB3 = 0x13;
+ } else if (dev_priv->psr.y_cord_support) {
+ psr_vsc.sdp_header.HB2 = 0x4;
+ psr_vsc.sdp_header.HB3 = 0xe;
+ } else {
+ psr_vsc.sdp_header.HB2 = 0x3;
+ psr_vsc.sdp_header.HB3 = 0xc;
+ }
+
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
@@ -196,7 +209,11 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
DP_AUX_FRAME_SYNC_ENABLE);
-
+ /* Enable ALPM at sink for psr2 */
+ if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE);
if (dev_priv->psr.link_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
@@ -248,7 +265,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
VLV_EDP_PSR_ACTIVE_ENTRY);
}
-static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+static void intel_enable_source_psr1(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
@@ -299,14 +316,31 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
val |= EDP_PSR_TP1_TP2_SEL;
I915_WRITE(EDP_PSR_CTL, val);
+}
- if (!dev_priv->psr.psr2_support)
- return;
+static void intel_enable_source_psr2(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ /*
+ * Let's respect VBT in case VBT asks a higher idle_frame value.
+ * Let's use 6 as the minimum to cover all known cases including
+ * the off-by-one issue that HW has in some cases. Also there are
+ * cases where sink should be able to train
+ * with the 5 or 6 idle patterns.
+ */
+ uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+ uint32_t val;
+
+ val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
* good enough. */
- val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+ val |= EDP_PSR2_ENABLE |
+ EDP_SU_TRACK_ENABLE |
+ EDP_FRAMES_BEFORE_SU_ENTRY;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
val |= EDP_PSR2_TP2_TIME_2500;
@@ -320,6 +354,19 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR2_CTL, val);
}
+static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ /* psr1 and psr2 are mutually exclusive.*/
+ if (dev_priv->psr.psr2_support)
+ intel_enable_source_psr2(intel_dp);
+ else
+ intel_enable_source_psr1(intel_dp);
+}
+
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -387,6 +434,22 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
+ /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
+ if (intel_crtc->config->pipe_src_w > 3200 ||
+ intel_crtc->config->pipe_src_h > 2000) {
+ dev_priv->psr.psr2_support = false;
+ return false;
+ }
+
+ /*
+ * FIXME:enable psr2 only for y-cordinate psr2 panels
+ * After gtc implementation , remove this restriction.
+ */
+ if (!dev_priv->psr.y_cord_support && dev_priv->psr.psr2_support) {
+ DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
+ return false;
+ }
+
dev_priv->psr.source_ok = true;
return true;
}
@@ -397,7 +460,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (dev_priv->psr.psr2_support)
+ WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+ else
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
@@ -426,6 +492,8 @@ void intel_psr_enable(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+ u32 chicken;
if (!HAS_PSR(dev_priv)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
@@ -449,26 +517,34 @@ void intel_psr_enable(struct intel_dp *intel_dp)
dev_priv->psr.busy_frontbuffer_bits = 0;
if (HAS_DDI(dev_priv)) {
- hsw_psr_setup_vsc(intel_dp);
-
if (dev_priv->psr.psr2_support) {
- /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
- if (crtc->config->pipe_src_w > 3200 ||
- crtc->config->pipe_src_h > 2000)
- dev_priv->psr.psr2_support = false;
- else
- skl_psr_setup_su_vsc(intel_dp);
+ skl_psr_setup_su_vsc(intel_dp);
+ chicken = PSR2_VSC_ENABLE_PROG_HEADER;
+ if (dev_priv->psr.y_cord_support)
+ chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
+ I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
+ I915_WRITE(EDP_PSR_DEBUG_CTL,
+ EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP |
+ EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
+ } else {
+ /* set up vsc header for psr1 */
+ hsw_psr_setup_vsc(intel_dp);
+ /*
+ * Per Spec: Avoid continuous PSR exit by masking MEMUP
+ * and HPD. also mask LPSP to avoid dependency on other
+ * drivers that might block runtime_pm besides
+ * preventing other hw tracking issues now we can rely
+ * on frontbuffer tracking.
+ */
+ I915_WRITE(EDP_PSR_DEBUG_CTL,
+ EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP);
}
- /*
- * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
- * Also mask LPSP to avoid dependency on other drivers that
- * might block runtime_pm besides preventing other hw tracking
- * issues now we can rely on frontbuffer tracking.
- */
- I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
@@ -544,20 +620,42 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
- I915_WRITE(EDP_PSR_CTL,
- I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+ i915_reg_t psr_ctl;
+ u32 psr_status_mask;
+
+ if (dev_priv->psr.aux_frame_sync)
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
+ 0);
+
+ if (dev_priv->psr.psr2_support) {
+ psr_ctl = EDP_PSR2_CTL;
+ psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
+
+ I915_WRITE(psr_ctl,
+ I915_READ(psr_ctl) &
+ ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
+
+ } else {
+ psr_ctl = EDP_PSR_STATUS_CTL;
+ psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+
+ I915_WRITE(psr_ctl,
+ I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
+ }
/* Wait till PSR is idle */
if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS_CTL,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
+ psr_ctl, psr_status_mask, 0,
2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
} else {
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (dev_priv->psr.psr2_support)
+ WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+ else
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
}
}
@@ -608,13 +706,24 @@ static void intel_psr_work(struct work_struct *work)
* and be ready for re-enable.
*/
if (HAS_DDI(dev_priv)) {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS_CTL,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
+ if (dev_priv->psr.psr2_support) {
+ if (intel_wait_for_register(dev_priv,
+ EDP_PSR2_STATUS_CTL,
+ EDP_PSR2_STATUS_STATE_MASK,
+ 0,
+ 50)) {
+ DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
+ return;
+ }
+ } else {
+ if (intel_wait_for_register(dev_priv,
+ EDP_PSR_STATUS_CTL,
+ EDP_PSR_STATUS_STATE_MASK,
+ 0,
+ 50)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
}
} else {
if (intel_wait_for_register(dev_priv,
@@ -656,11 +765,19 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
return;
if (HAS_DDI(dev_priv)) {
- val = I915_READ(EDP_PSR_CTL);
-
- WARN_ON(!(val & EDP_PSR_ENABLE));
-
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+ if (dev_priv->psr.aux_frame_sync)
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
+ 0);
+ if (dev_priv->psr.psr2_support) {
+ val = I915_READ(EDP_PSR2_CTL);
+ WARN_ON(!(val & EDP_PSR2_ENABLE));
+ I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
+ } else {
+ val = I915_READ(EDP_PSR_CTL);
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+ I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+ }
} else {
val = I915_READ(VLV_PSRCTL(pipe));
@@ -813,15 +930,13 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
/**
* intel_psr_init - Init basic PSR work and mutex.
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* This function is called only once at driver load to initialize basic
* PSR stuff.
*/
-void intel_psr_init(struct drm_device *dev)
+void intel_psr_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 91cb4c422ad5..91bc4abf5d3e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -599,10 +599,62 @@ out:
static void reset_ring_common(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
- struct intel_ring *ring = request->ring;
+ /* Try to restore the logical GPU state to match the continuation
+ * of the request queue. If we skip the context/PD restore, then
+ * the next request may try to execute assuming that its context
+ * is valid and loaded on the GPU and so may try to access invalid
+ * memory, prompting repeated GPU hangs.
+ *
+ * If the request was guilty, we still restore the logical state
+ * in case the next request requires it (e.g. the aliasing ppgtt),
+ * but skip over the hung batch.
+ *
+ * If the request was innocent, we try to replay the request with
+ * the restored context.
+ */
+ if (request) {
+ struct drm_i915_private *dev_priv = request->i915;
+ struct intel_context *ce = &request->ctx->engine[engine->id];
+ struct i915_hw_ppgtt *ppgtt;
+
+ /* FIXME consider gen8 reset */
+
+ if (ce->state) {
+ I915_WRITE(CCID,
+ i915_ggtt_offset(ce->state) |
+ BIT(8) /* must be set! */ |
+ CCID_EXTENDED_STATE_SAVE |
+ CCID_EXTENDED_STATE_RESTORE |
+ CCID_EN);
+ }
- ring->head = request->postfix;
- ring->last_retired_head = -1;
+ ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
+ if (ppgtt) {
+ u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
+
+ I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
+
+ /* Wait for the PD reload to complete */
+ if (intel_wait_for_register(dev_priv,
+ RING_PP_DIR_BASE(engine),
+ BIT(0), 0,
+ 10))
+ DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
+
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+ }
+
+ /* If the rq hung, jump to its breadcrumb and skip the batch */
+ if (request->fence.error == -EIO) {
+ struct intel_ring *ring = request->ring;
+
+ ring->head = request->postfix;
+ ring->last_retired_head = -1;
+ }
+ } else {
+ engine->legacy_active_context = NULL;
+ }
}
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1728,7 +1780,7 @@ static int init_status_page(struct intel_engine_cs *engine)
void *vaddr;
int ret;
- obj = i915_gem_object_create_internal(engine->i915, 4096);
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
@@ -1738,7 +1790,7 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
- vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -1769,7 +1821,7 @@ static int init_status_page(struct intel_engine_cs *engine)
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
- engine->status_page.page_addr = memset(vaddr, 0, 4096);
+ engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma));
@@ -1797,10 +1849,9 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
return 0;
}
-int intel_ring_pin(struct intel_ring *ring)
+int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
{
- /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
+ unsigned int flags;
enum i915_map_type map;
struct i915_vma *vma = ring->vma;
void *addr;
@@ -1810,6 +1861,9 @@ int intel_ring_pin(struct intel_ring *ring)
map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
+ flags = PIN_GLOBAL;
+ if (offset_bias)
+ flags |= PIN_OFFSET_BIAS | offset_bias;
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
@@ -1861,16 +1915,16 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
+ obj = i915_gem_object_create_stolen(dev_priv, size);
if (!obj)
- obj = i915_gem_object_create(&dev_priv->drm, size);
+ obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
- vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err;
@@ -1904,7 +1958,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
* of the buffer.
*/
ring->effective_size = size;
- if (IS_I830(engine->i915) || IS_845G(engine->i915))
+ if (IS_I830(engine->i915) || IS_I845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
ring->last_retired_head = -1;
@@ -1931,8 +1985,26 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring);
}
-static int intel_ring_context_pin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int context_pin(struct i915_gem_context *ctx, unsigned int flags)
+{
+ struct i915_vma *vma = ctx->engine[RCS].state;
+ int ret;
+
+ /* Clear this page out of any CPU caches for coherent swap-in/out.
+ * We only want to do this on the first bind so that we do not stall
+ * on an active context (which by nature is already on the GPU).
+ */
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ if (ret)
+ return ret;
+ }
+
+ return i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
+}
+
+static int intel_ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
int ret;
@@ -1943,13 +2015,15 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
return 0;
if (ce->state) {
- struct i915_vma *vma;
+ unsigned int flags;
- vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
+ flags = 0;
+ if (i915_gem_context_is_kernel(ctx))
+ flags = PIN_HIGH;
+
+ ret = context_pin(ctx, flags);
+ if (ret)
goto error;
- }
}
/* The kernel context is only used as a placeholder for flushing the
@@ -1959,7 +2033,7 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
* as during eviction we cannot allocate and pin the renderstate in
* order to initialise the context.
*/
- if (ctx == ctx->i915->kernel_context)
+ if (i915_gem_context_is_kernel(ctx))
ce->initialised = true;
i915_gem_context_get(ctx);
@@ -1970,12 +2044,13 @@ error:
return ret;
}
-static void intel_ring_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static void intel_ring_context_unpin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+ GEM_BUG_ON(ce->pin_count == 0);
if (--ce->pin_count)
return;
@@ -2000,17 +2075,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
if (ret)
goto error;
- /* We may need to do things with the shrinker which
- * require us to immediately switch back to the default
- * context. This can cause a problem as pinning the
- * default context also requires GTT space which may not
- * be available. To avoid this we always pin the default
- * context.
- */
- ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
- if (ret)
- goto error;
-
ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
@@ -2028,7 +2092,8 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
goto error;
}
- ret = intel_ring_pin(ring);
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
if (ret) {
intel_ring_free(ring);
goto error;
@@ -2069,8 +2134,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
intel_engine_cleanup_common(engine);
- intel_ring_context_unpin(dev_priv->kernel_context, engine);
-
engine->i915 = NULL;
dev_priv->engine[engine->id] = NULL;
kfree(engine);
@@ -2087,16 +2150,19 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
}
}
-int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
+static int ring_request_alloc(struct drm_i915_gem_request *request)
{
int ret;
+ GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
+
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
+ GEM_BUG_ON(!request->engine->buffer);
request->ring = request->engine->buffer;
ret = intel_ring_begin(request, 0);
@@ -2444,11 +2510,11 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
struct i915_vma *vma;
- obj = i915_gem_object_create(&dev_priv->drm, 4096);
+ obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
if (IS_ERR(obj))
goto err;
- vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err_obj;
@@ -2576,6 +2642,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->init_hw = init_ring_common;
engine->reset_hw = reset_ring_common;
+ engine->context_pin = intel_ring_context_pin;
+ engine->context_unpin = intel_ring_context_unpin;
+
+ engine->request_alloc = ring_request_alloc;
+
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
if (i915.semaphores) {
@@ -2600,7 +2671,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_bb_start = gen6_emit_bb_start;
else if (INTEL_GEN(dev_priv) >= 4)
engine->emit_bb_start = i965_emit_bb_start;
- else if (IS_I830(dev_priv) || IS_845G(dev_priv))
+ else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
engine->emit_bb_start = i830_emit_bb_start;
else
engine->emit_bb_start = i915_emit_bb_start;
@@ -2656,7 +2727,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
return ret;
if (INTEL_GEN(dev_priv) >= 6) {
- ret = intel_engine_create_scratch(engine, 4096);
+ ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
return ret;
} else if (HAS_BROKEN_CS_TLB(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3466b4e77e7c..79c2b8d72322 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -65,14 +65,37 @@ struct intel_hw_status_page {
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
enum intel_engine_hangcheck_action {
- HANGCHECK_IDLE = 0,
- HANGCHECK_WAIT,
- HANGCHECK_ACTIVE,
- HANGCHECK_KICK,
- HANGCHECK_HUNG,
+ ENGINE_IDLE = 0,
+ ENGINE_WAIT,
+ ENGINE_ACTIVE_SEQNO,
+ ENGINE_ACTIVE_HEAD,
+ ENGINE_ACTIVE_SUBUNITS,
+ ENGINE_WAIT_KICK,
+ ENGINE_DEAD,
};
-#define HANGCHECK_SCORE_RING_HUNG 31
+static inline const char *
+hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
+{
+ switch (a) {
+ case ENGINE_IDLE:
+ return "idle";
+ case ENGINE_WAIT:
+ return "wait";
+ case ENGINE_ACTIVE_SEQNO:
+ return "active seqno";
+ case ENGINE_ACTIVE_HEAD:
+ return "active head";
+ case ENGINE_ACTIVE_SUBUNITS:
+ return "active subunits";
+ case ENGINE_WAIT_KICK:
+ return "wait kick";
+ case ENGINE_DEAD:
+ return "dead";
+ }
+
+ return "unknown";
+}
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 3
@@ -104,10 +127,11 @@ struct intel_instdone {
struct intel_engine_hangcheck {
u64 acthd;
u32 seqno;
- int score;
enum intel_engine_hangcheck_action action;
+ unsigned long action_timestamp;
int deadlock;
struct intel_instdone instdone;
+ bool stalled;
};
struct intel_ring {
@@ -242,6 +266,11 @@ struct intel_engine_cs {
void (*reset_hw)(struct intel_engine_cs *engine,
struct drm_i915_gem_request *req);
+ int (*context_pin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+ void (*context_unpin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+ int (*request_alloc)(struct drm_i915_gem_request *req);
int (*init_context)(struct drm_i915_gem_request *req);
int (*emit_flush)(struct drm_i915_gem_request *request,
@@ -355,7 +384,24 @@ struct intel_engine_cs {
bool preempt_wa;
u32 ctx_desc_template;
- struct i915_gem_context *last_context;
+ /* Contexts are pinned whilst they are active on the GPU. The last
+ * context executed remains active whilst the GPU is idle - the
+ * switch away and write to the context object only occurs on the
+ * next execution. Contexts are only unpinned on retirement of the
+ * following request ensuring that we can always write to the object
+ * on the context switch even after idling. Across suspend, we switch
+ * to the kernel context and trash it as the save may not happen
+ * before the hardware is powered down.
+ */
+ struct i915_gem_context *last_retired_context;
+
+ /* We track the current MI_SET_CONTEXT in order to eliminate
+ * redudant context switches. This presumes that requests are not
+ * reordered! Or when they are the tracking is updated along with
+ * the emission of individual requests into the legacy command
+ * stream (ring).
+ */
+ struct i915_gem_context *legacy_active_context;
struct intel_engine_hangcheck hangcheck;
@@ -437,7 +483,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
-int intel_ring_pin(struct intel_ring *ring);
+int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
void intel_ring_unpin(struct intel_ring *ring);
void intel_ring_free(struct intel_ring *ring);
@@ -446,8 +492,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
-
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 87b4af092d54..c0b7e95b5b8e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -453,6 +453,57 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_MODESET) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
@@ -530,7 +581,7 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_EN_DC9;
else
mask |= DC_STATE_EN_UPTO_DC6;
@@ -694,7 +745,7 @@ gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
}
static void skl_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
+ struct i915_power_well *power_well, bool enable)
{
uint32_t tmp, fuse_status;
uint32_t req_mask, state_mask;
@@ -720,11 +771,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
return;
}
break;
- case SKL_DISP_PW_DDI_A_E:
+ case SKL_DISP_PW_MISC_IO:
+ case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */
case SKL_DISP_PW_DDI_B:
case SKL_DISP_PW_DDI_C:
case SKL_DISP_PW_DDI_D:
- case SKL_DISP_PW_MISC_IO:
+ case GLK_DISP_PW_AUX_A:
+ case GLK_DISP_PW_AUX_B:
+ case GLK_DISP_PW_AUX_C:
break;
default:
WARN(1, "Unknown power well %lu\n", power_well->id);
@@ -884,6 +938,12 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
if (power_well->count > 0)
bxt_ddi_phy_verify_state(dev_priv, power_well->data);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv, power_well->data);
+ }
}
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
@@ -911,7 +971,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
gen9_assert_dbuf_enabled(dev_priv);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
bxt_verify_ddi_phy_power_wells(dev_priv);
}
@@ -2161,6 +2221,91 @@ static struct i915_power_well bxt_power_wells[] = {
},
};
+static struct i915_power_well glk_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .domains = 0,
+ .ops = &skl_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ },
+ {
+ .name = "DC off",
+ .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_PW_DC_OFF,
+ },
+ {
+ .name = "power well 2",
+ .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ },
+ {
+ .name = "dpio-common-a",
+ .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = BXT_DPIO_CMN_A,
+ .data = DPIO_PHY1,
+ },
+ {
+ .name = "dpio-common-b",
+ .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = BXT_DPIO_CMN_BC,
+ .data = DPIO_PHY0,
+ },
+ {
+ .name = "dpio-common-c",
+ .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = GLK_DPIO_CMN_C,
+ .data = DPIO_PHY2,
+ },
+ {
+ .name = "AUX A",
+ .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = GLK_DISP_PW_AUX_A,
+ },
+ {
+ .name = "AUX B",
+ .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = GLK_DISP_PW_AUX_B,
+ },
+ {
+ .name = "AUX C",
+ .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = GLK_DISP_PW_AUX_C,
+ },
+ {
+ .name = "DDI A power well",
+ .domains = GLK_DISPLAY_DDI_A_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = GLK_DISP_PW_DDI_A,
+ },
+ {
+ .name = "DDI B power well",
+ .domains = GLK_DISPLAY_DDI_B_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = SKL_DISP_PW_DDI_B,
+ },
+ {
+ .name = "DDI C power well",
+ .domains = GLK_DISPLAY_DDI_C_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = SKL_DISP_PW_DDI_C,
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -2181,7 +2326,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
max_dc = 2;
mask = 0;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
max_dc = 1;
/*
* DC9 has a separate HW flow from the rest of the DC states,
@@ -2257,6 +2402,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, skl_power_wells);
} else if (IS_BROXTON(dev_priv)) {
set_power_wells(power_domains, bxt_power_wells);
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ set_power_wells(power_domains, glk_power_wells);
} else if (IS_CHERRYVIEW(dev_priv)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv)) {
@@ -2585,7 +2732,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
skl_display_core_init(dev_priv, resume);
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
} else if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
@@ -2624,7 +2771,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_display_core_uninit(dev_priv);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_display_core_uninit(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 27808e91cb5a..2ad13903a054 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1296,7 +1296,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv)) {
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (crtc_state->pixel_multiplier - 1)
@@ -2342,9 +2342,9 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
}
static u8
-intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
+intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->port == PORT_B) {
@@ -2934,9 +2934,9 @@ static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
static bool
intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
- struct drm_device *dev)
+ struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
@@ -2957,10 +2957,9 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
WARN_ON(port != PORT_B && port != PORT_C);
}
-bool intel_sdvo_init(struct drm_device *dev,
+bool intel_sdvo_init(struct drm_i915_private *dev_priv,
i915_reg_t sdvo_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
@@ -2973,16 +2972,18 @@ bool intel_sdvo_init(struct drm_device *dev,
intel_sdvo->sdvo_reg = sdvo_reg;
intel_sdvo->port = port;
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
+ intel_sdvo->slave_addr =
+ intel_sdvo_get_slave_addr(dev_priv, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
- if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev_priv))
goto err_i2c_bus;
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
intel_encoder->port = port;
- drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_sdvo_enc_funcs, 0,
"SDVO %c", port_name(port));
/* Read the regs to test if we can talk to the device */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 242a73e66d82..9ef54688872a 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -203,8 +203,8 @@ skl_update_plane(struct drm_plane *drm_plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane + 1;
+ enum plane_id plane_id = intel_plane->id;
+ enum pipe pipe = intel_plane->pipe;
u32 plane_ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->main.offset;
@@ -223,15 +223,15 @@ skl_update_plane(struct drm_plane *drm_plane,
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
- plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
+ plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= skl_plane_ctl_rotation(rotation);
if (key->flags) {
- I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
- I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
- I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+ I915_WRITE(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+ I915_WRITE(PLANE_KEYMAX(pipe, plane_id), key->max_value);
+ I915_WRITE(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
}
if (key->flags & I915_SET_COLORKEY_DESTINATION)
@@ -245,36 +245,36 @@ skl_update_plane(struct drm_plane *drm_plane,
crtc_w--;
crtc_h--;
- I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
- I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
- I915_WRITE(PLANE_SIZE(pipe, plane), (src_h << 16) | src_w);
+ I915_WRITE(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+ I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler;
- DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
- PS_PLANE_SEL(plane));
+ DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
+ plane_id, PS_PLANE_SEL(plane_id));
scaler = &crtc_state->scaler_state.scalers[scaler_id];
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
- PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
+ PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
((crtc_w + 1) << 16)|(crtc_h + 1));
- I915_WRITE(PLANE_POS(pipe, plane), 0);
+ I915_WRITE(PLANE_POS(pipe, plane_id), 0);
} else {
- I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+ I915_WRITE(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
}
- I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
- I915_WRITE(PLANE_SURF(pipe, plane),
+ I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
+ I915_WRITE(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- POSTING_READ(PLANE_SURF(pipe, plane));
+ POSTING_READ(PLANE_SURF(pipe, plane_id));
}
static void
@@ -283,20 +283,20 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane + 1;
+ enum plane_id plane_id = intel_plane->id;
+ enum pipe pipe = intel_plane->pipe;
- I915_WRITE(PLANE_CTL(pipe, plane), 0);
+ I915_WRITE(PLANE_CTL(pipe, plane_id), 0);
- I915_WRITE(PLANE_SURF(pipe, plane), 0);
- POSTING_READ(PLANE_SURF(pipe, plane));
+ I915_WRITE(PLANE_SURF(pipe, plane_id), 0);
+ POSTING_READ(PLANE_SURF(pipe, plane_id));
}
static void
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
{
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- int plane = intel_plane->plane;
+ enum plane_id plane_id = intel_plane->id;
/* Seems RGB data bypasses the CSC always */
if (!format_is_yuv(format))
@@ -312,23 +312,23 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
* Cb and Cr apparently come in as signed already, so no
* need for any offset. For Y we need to remove the offset.
*/
- I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
- I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
- I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-
- I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
- I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
- I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
- I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
- I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
-
- I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
- I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
- I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
-
- I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
+ I915_WRITE(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ I915_WRITE(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+ I915_WRITE(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
+ I915_WRITE(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
+ I915_WRITE(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
+ I915_WRITE(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
+ I915_WRITE(SPCSCC8(plane_id), SPCSC_C0(8263));
+
+ I915_WRITE(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+ I915_WRITE(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+ I915_WRITE(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+
+ I915_WRITE(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
static void
@@ -340,8 +340,8 @@ vlv_update_plane(struct drm_plane *dplane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct drm_framebuffer *fb = plane_state->base.fb;
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
+ enum pipe pipe = intel_plane->pipe;
+ enum plane_id plane_id = intel_plane->id;
u32 sprctl;
u32 sprsurf_offset, linear_offset;
unsigned int rotation = plane_state->base.rotation;
@@ -357,7 +357,7 @@ vlv_update_plane(struct drm_plane *dplane,
sprctl = SP_ENABLE;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_YUYV:
sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
break;
@@ -434,32 +434,32 @@ vlv_update_plane(struct drm_plane *dplane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (key->flags) {
- I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
- I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
- I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+ I915_WRITE(SPKEYMINVAL(pipe, plane_id), key->min_value);
+ I915_WRITE(SPKEYMAXVAL(pipe, plane_id), key->max_value);
+ I915_WRITE(SPKEYMSK(pipe, plane_id), key->channel_mask);
}
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
- chv_update_csc(intel_plane, fb->pixel_format);
+ chv_update_csc(intel_plane, fb->format->format);
- I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
- I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+ I915_WRITE(SPSTRIDE(pipe, plane_id), fb->pitches[0]);
+ I915_WRITE(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
+ I915_WRITE(SPTILEOFF(pipe, plane_id), (y << 16) | x);
else
- I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+ I915_WRITE(SPLINOFF(pipe, plane_id), linear_offset);
- I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
+ I915_WRITE(SPCONSTALPHA(pipe, plane_id), 0);
- I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
- I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_WRITE(SPSURF(pipe, plane),
+ I915_WRITE(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ I915_WRITE(SPCNTR(pipe, plane_id), sprctl);
+ I915_WRITE(SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- POSTING_READ(SPSURF(pipe, plane));
+ POSTING_READ(SPSURF(pipe, plane_id));
}
static void
@@ -468,13 +468,13 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
+ enum pipe pipe = intel_plane->pipe;
+ enum plane_id plane_id = intel_plane->id;
- I915_WRITE(SPCNTR(pipe, plane), 0);
+ I915_WRITE(SPCNTR(pipe, plane_id), 0);
- I915_WRITE(SPSURF(pipe, plane), 0);
- POSTING_READ(SPSURF(pipe, plane));
+ I915_WRITE(SPSURF(pipe, plane_id), 0);
+ POSTING_READ(SPSURF(pipe, plane_id));
}
static void
@@ -502,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane,
sprctl = SPRITE_ENABLE;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
break;
@@ -640,7 +640,7 @@ ilk_update_plane(struct drm_plane *plane,
dvscntr = DVS_ENABLE;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
break;
@@ -866,7 +866,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
- if (format_is_yuv(fb->pixel_format)) {
+ if (format_is_yuv(fb->format->format)) {
src_x &= ~1;
src_w &= ~1;
@@ -885,7 +885,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
/* Check size restrictions when scaling */
if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = fb->format->cpp[0];
WARN_ON(!can_scale);
@@ -1112,6 +1112,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->pipe = pipe;
intel_plane->plane = plane;
+ intel_plane->id = PLANE_SPRITE0 + plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 78cdfc6833d6..eb692e4ffe01 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1537,9 +1537,9 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
};
void
-intel_tv_init(struct drm_device *dev)
+intel_tv_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
new file mode 100644
index 000000000000..c46bc8594f22
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_uc.h"
+
+void intel_uc_init_early(struct drm_i915_private *dev_priv)
+{
+ mutex_init(&dev_priv->guc.send_mutex);
+}
+
+/*
+ * Read GuC command/status register (SOFT_SCRATCH_0)
+ * Return true if it contains a response rather than a command
+ */
+static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ u32 val = I915_READ(SOFT_SCRATCH(0));
+ *status = val;
+ return INTEL_GUC_RECV_IS_RESPONSE(val);
+}
+
+int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 status;
+ int i;
+ int ret;
+
+ if (WARN_ON(len < 1 || len > 15))
+ return -EINVAL;
+
+ mutex_lock(&guc->send_mutex);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ dev_priv->guc.action_count += 1;
+ dev_priv->guc.action_cmd = action[0];
+
+ for (i = 0; i < len; i++)
+ I915_WRITE(SOFT_SCRATCH(i), action[i]);
+
+ POSTING_READ(SOFT_SCRATCH(i - 1));
+
+ I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+
+ /*
+ * Fast commands should complete in less than 10us, so sample quickly
+ * up to that length of time, then switch to a slower sleep-wait loop.
+ * No inte_guc_send command should ever take longer than 10ms.
+ */
+ ret = wait_for_us(intel_guc_recv(guc, &status), 10);
+ if (ret)
+ ret = wait_for(intel_guc_recv(guc, &status), 10);
+ if (status != INTEL_GUC_STATUS_SUCCESS) {
+ /*
+ * Either the GuC explicitly returned an error (which
+ * we convert to -EIO here) or no response at all was
+ * received within the timeout limit (-ETIMEDOUT)
+ */
+ if (ret != -ETIMEDOUT)
+ ret = -EIO;
+
+ DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
+ " ret=%d status=0x%08X response=0x%08X\n",
+ action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
+
+ dev_priv->guc.action_fail += 1;
+ dev_priv->guc.action_err = ret;
+ }
+ dev_priv->guc.action_status = status;
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&guc->send_mutex);
+
+ return ret;
+}
+
+int intel_guc_sample_forcewake(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 action[2];
+
+ action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
+ /* WaRsDisableCoarsePowerGating:skl,bxt */
+ if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
+ action[1] = 0;
+ else
+ /* bit 0 and 1 are for Render and Media domain separately */
+ action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_uc.h
index 0053258e03d3..d74f4d3ad8dc 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -21,13 +21,15 @@
* IN THE SOFTWARE.
*
*/
-#ifndef _INTEL_GUC_H_
-#define _INTEL_GUC_H_
+#ifndef _INTEL_UC_H_
+#define _INTEL_UC_H_
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
#include "intel_ringbuffer.h"
+#include "i915_vma.h"
+
struct drm_i915_gem_request;
/*
@@ -74,7 +76,7 @@ struct i915_guc_client {
uint32_t proc_desc_offset;
uint32_t doorbell_offset;
- uint32_t cookie;
+ uint32_t doorbell_cookie;
uint16_t doorbell_id;
uint16_t padding[3]; /* Maintain alignment */
@@ -91,30 +93,35 @@ struct i915_guc_client {
uint64_t submissions[I915_NUM_ENGINES];
};
-enum intel_guc_fw_status {
- GUC_FIRMWARE_FAIL = -1,
- GUC_FIRMWARE_NONE = 0,
- GUC_FIRMWARE_PENDING,
- GUC_FIRMWARE_SUCCESS
+enum intel_uc_fw_status {
+ INTEL_UC_FIRMWARE_FAIL = -1,
+ INTEL_UC_FIRMWARE_NONE = 0,
+ INTEL_UC_FIRMWARE_PENDING,
+ INTEL_UC_FIRMWARE_SUCCESS
+};
+
+enum intel_uc_fw_type {
+ INTEL_UC_FW_TYPE_GUC,
+ INTEL_UC_FW_TYPE_HUC
};
/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the GuC.
*/
-struct intel_guc_fw {
- struct drm_device * guc_dev;
- const char * guc_fw_path;
- size_t guc_fw_size;
- struct drm_i915_gem_object * guc_fw_obj;
- enum intel_guc_fw_status guc_fw_fetch_status;
- enum intel_guc_fw_status guc_fw_load_status;
-
- uint16_t guc_fw_major_wanted;
- uint16_t guc_fw_minor_wanted;
- uint16_t guc_fw_major_found;
- uint16_t guc_fw_minor_found;
-
+struct intel_uc_fw {
+ const char *path;
+ size_t size;
+ struct drm_i915_gem_object *obj;
+ enum intel_uc_fw_status fetch_status;
+ enum intel_uc_fw_status load_status;
+
+ uint16_t major_ver_wanted;
+ uint16_t minor_ver_wanted;
+ uint16_t major_ver_found;
+ uint16_t minor_ver_found;
+
+ enum intel_uc_fw_type fw;
uint32_t header_size;
uint32_t header_offset;
uint32_t rsa_size;
@@ -140,10 +147,10 @@ struct intel_guc_log {
};
struct intel_guc {
- struct intel_guc_fw guc_fw;
+ struct intel_uc_fw fw;
struct intel_guc_log log;
- /* GuC2Host interrupt related state */
+ /* intel_guc_recv interrupt related state */
bool interrupts_enabled;
struct i915_vma *ads_vma;
@@ -165,17 +172,32 @@ struct intel_guc {
uint64_t submissions[I915_NUM_ENGINES];
uint32_t last_seqno[I915_NUM_ENGINES];
- /* To serialize the Host2GuC actions */
- struct mutex action_lock;
+ /* To serialize the intel_guc_send actions */
+ struct mutex send_mutex;
+};
+
+struct intel_huc {
+ /* Generic uC firmware management */
+ struct intel_uc_fw fw;
+
+ /* HuC-specific additions */
};
+/* intel_uc.c */
+void intel_uc_init_early(struct drm_i915_private *dev_priv);
+int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_sample_forcewake(struct intel_guc *guc);
+
/* intel_guc_loader.c */
-extern void intel_guc_init(struct drm_device *dev);
-extern int intel_guc_setup(struct drm_device *dev);
-extern void intel_guc_fini(struct drm_device *dev);
-extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
-extern int intel_guc_suspend(struct drm_device *dev);
-extern int intel_guc_resume(struct drm_device *dev);
+extern void intel_guc_init(struct drm_i915_private *dev_priv);
+extern int intel_guc_setup(struct drm_i915_private *dev_priv);
+extern void intel_guc_fini(struct drm_i915_private *dev_priv);
+extern const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
+extern int intel_guc_suspend(struct drm_i915_private *dev_priv);
+extern int intel_guc_resume(struct drm_i915_private *dev_priv);
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw);
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
/* i915_guc_submission.c */
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
@@ -184,10 +206,26 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
-void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
-void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
-void i915_guc_register(struct drm_i915_private *dev_priv);
-void i915_guc_unregister(struct drm_i915_private *dev_priv);
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+
+/* intel_guc_log.c */
+void intel_guc_log_create(struct intel_guc *guc);
+void i915_guc_log_register(struct drm_i915_private *dev_priv);
+void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
+static inline u32 guc_ggtt_offset(struct i915_vma *vma)
+{
+ u32 offset = i915_ggtt_offset(vma);
+ GEM_BUG_ON(offset < GUC_WOPCM_TOP);
+ GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
+ return offset;
+}
+
+/* intel_huc.c */
+void intel_huc_init(struct drm_i915_private *dev_priv);
+void intel_huc_fini(struct drm_i915_private *dev_priv);
+int intel_huc_load(struct drm_i915_private *dev_priv);
+void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0bffd3f0c15d..abe08885a5ba 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -421,8 +421,7 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
GT_FIFO_CTL_RC6_POLICY_STALL);
}
- /* Enable Decoupled MMIO only on BXT C stepping onwards */
- if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
info->has_decoupled_mmio = false;
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
@@ -626,7 +625,14 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
dev_priv->uncore.fw_domains_table_entries,
fw_range_cmp);
- return entry ? entry->domains : 0;
+ if (!entry)
+ return 0;
+
+ WARN(entry->domains & ~dev_priv->uncore.fw_domains,
+ "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
+ entry->domains & ~dev_priv->uncore.fw_domains, offset);
+
+ return entry->domains;
}
static void
@@ -1813,7 +1819,7 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
return ironlake_do_reset;
else if (IS_G4X(dev_priv))
return g4x_do_reset;
- else if (IS_G33(dev_priv))
+ else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
return g33_do_reset;
else if (INTEL_INFO(dev_priv)->gen >= 3)
return i915_do_reset;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 8886cab19f98..a92e7762f596 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -399,10 +399,12 @@ struct lvds_dvo_timing {
u8 vblank_hi:4;
u8 vactive_hi:4;
u8 hsync_off_lo;
- u8 hsync_pulse_width;
- u8 vsync_pulse_width:4;
- u8 vsync_off:4;
- u8 rsvd0:6;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_pulse_width_lo:4;
+ u8 vsync_off_lo:4;
+ u8 vsync_pulse_width_hi:2;
+ u8 vsync_off_hi:2;
+ u8 hsync_pulse_width_hi:2;
u8 hsync_off_hi:2;
u8 himage_lo;
u8 vimage_lo;
@@ -414,7 +416,7 @@ struct lvds_dvo_timing {
u8 digital:2;
u8 vsync_positive:1;
u8 hsync_positive:1;
- u8 rsvd2:1;
+ u8 non_interlaced:1;
} __packed;
struct lvds_pnp_id {
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 359cd2765552..f645275e6e63 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -207,8 +207,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct drm_encoder *encoder;
struct imx_hdmi *hdmi;
- struct resource *iores;
- int irq;
int ret;
if (!pdev->dev.of_node)
@@ -223,14 +221,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
hdmi->dev = &pdev->dev;
encoder = &hdmi->encoder;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENXIO;
-
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
@@ -249,7 +239,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
- ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+ ret = dw_hdmi_bind(pdev, encoder, plat_data);
/*
* If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
@@ -264,7 +254,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
void *data)
{
- return dw_hdmi_unbind(dev, master, data);
+ return dw_hdmi_unbind(dev);
}
static const struct component_ops dw_hdmi_imx_ops = {
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 33404295b447..f562cb7964b0 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -357,8 +357,8 @@ static int imx_drm_bind(struct device *dev)
* this value would be used to check framebuffer size limitation
* at drm_mode_addfb().
*/
- drm->mode_config.min_width = 64;
- drm->mode_config.min_height = 64;
+ drm->mode_config.min_width = 1;
+ drm->mode_config.min_height = 1;
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &imx_drm_mode_config_funcs;
@@ -389,8 +389,7 @@ static int imx_drm_bind(struct device *dev)
dev_warn(dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
legacyfb_depth = 16;
}
- imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
- drm->mode_config.num_crtc, MAX_CRTC);
+ imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, MAX_CRTC);
if (IS_ERR(imxdrm->fbhelper)) {
ret = PTR_ERR(imxdrm->fbhelper);
imxdrm->fbhelper = NULL;
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 516d06490465..88cd11d30134 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -454,10 +454,8 @@ static int imx_ldb_register(struct drm_device *drm,
DRM_MODE_ENCODER_LVDS, NULL);
if (imx_ldb_ch->bridge) {
- imx_ldb_ch->bridge->encoder = encoder;
-
- imx_ldb_ch->encoder.bridge = imx_ldb_ch->bridge;
- ret = drm_bridge_attach(drm, imx_ldb_ch->bridge);
+ ret = drm_bridge_attach(&imx_ldb_ch->encoder,
+ imx_ldb_ch->bridge, NULL);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
@@ -738,8 +736,6 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
for (i = 0; i < 2; i++) {
struct imx_ldb_channel *channel = &imx_ldb->channel[i];
- if (channel->bridge)
- drm_bridge_detach(channel->bridge);
if (channel->panel)
drm_panel_detach(channel->panel);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 3b602ee33c44..4826bb781723 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -98,6 +98,8 @@
/* TVE_TST_MODE_REG */
#define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0)
+#define IMX_TVE_DAC_VOLTAGE 2750000
+
enum {
TVE_MODE_TVOUT,
TVE_MODE_VGA,
@@ -150,13 +152,11 @@ __releases(&tve->lock)
static void tve_enable(struct imx_tve *tve)
{
- int ret;
-
if (!tve->enabled) {
tve->enabled = true;
clk_prepare_enable(tve->clk);
- ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_EN, TVE_EN);
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
+ TVE_EN, TVE_EN);
}
/* clear interrupt status register */
@@ -174,12 +174,9 @@ static void tve_enable(struct imx_tve *tve)
static void tve_disable(struct imx_tve *tve)
{
- int ret;
-
if (tve->enabled) {
tve->enabled = false;
- ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_EN, 0);
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
clk_disable_unprepare(tve->clk);
}
}
@@ -621,9 +618,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
tve->dac_reg = devm_regulator_get(dev, "dac");
if (!IS_ERR(tve->dac_reg)) {
- ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
- if (ret)
- return ret;
+ if (regulator_get_voltage(tve->dac_reg) != IMX_TVE_DAC_VOLTAGE)
+ dev_warn(dev, "dac voltage is not %d uV\n", IMX_TVE_DAC_VOLTAGE);
ret = regulator_enable(tve->dac_reg);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index e74a0ad52950..8b5294d47cee 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -77,7 +77,7 @@ drm_plane_state_to_eba(struct drm_plane_state *state)
BUG_ON(!cma_obj);
return cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y +
- drm_format_plane_cpp(fb->pixel_format, 0) * x;
+ fb->format->cpp[0] * x;
}
static inline unsigned long
@@ -92,11 +92,11 @@ drm_plane_state_to_ubo(struct drm_plane_state *state)
cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
BUG_ON(!cma_obj);
- x /= drm_format_horz_chroma_subsampling(fb->pixel_format);
- y /= drm_format_vert_chroma_subsampling(fb->pixel_format);
+ x /= drm_format_horz_chroma_subsampling(fb->format->format);
+ y /= drm_format_vert_chroma_subsampling(fb->format->format);
return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
- drm_format_plane_cpp(fb->pixel_format, 1) * x - eba;
+ fb->format->cpp[1] * x - eba;
}
static inline unsigned long
@@ -111,11 +111,11 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
BUG_ON(!cma_obj);
- x /= drm_format_horz_chroma_subsampling(fb->pixel_format);
- y /= drm_format_vert_chroma_subsampling(fb->pixel_format);
+ x /= drm_format_horz_chroma_subsampling(fb->format->format);
+ y /= drm_format_vert_chroma_subsampling(fb->format->format);
return cma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
- drm_format_plane_cpp(fb->pixel_format, 2) * x - eba;
+ fb->format->cpp[2] * x - eba;
}
void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
@@ -281,7 +281,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
*/
if (old_fb && (state->src_w != old_state->src_w ||
state->src_h != old_state->src_h ||
- fb->pixel_format != old_fb->pixel_format))
+ fb->format != old_fb->format))
crtc_state->mode_changed = true;
eba = drm_plane_state_to_eba(state);
@@ -295,7 +295,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (old_fb && fb->pitches[0] != old_fb->pitches[0])
crtc_state->mode_changed = true;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
@@ -315,7 +315,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (vbo & 0x7 || vbo > 0xfffff8)
return -EINVAL;
- if (old_fb && (fb->pixel_format == old_fb->pixel_format)) {
+ if (old_fb && (fb->format == old_fb->format)) {
old_vbo = drm_plane_state_to_vbo(old_state);
if (vbo != old_vbo)
crtc_state->mode_changed = true;
@@ -332,7 +332,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (ubo & 0x7 || ubo > 0xfffff8)
return -EINVAL;
- if (old_fb && (fb->pixel_format == old_fb->pixel_format)) {
+ if (old_fb && (fb->format == old_fb->format)) {
old_ubo = drm_plane_state_to_ubo(old_state);
if (ubo != old_ubo)
crtc_state->mode_changed = true;
@@ -348,8 +348,8 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
* The x/y offsets must be even in case of horizontal/vertical
* chroma subsampling.
*/
- hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+ hsub = drm_format_horz_chroma_subsampling(fb->format->format);
+ vsub = drm_format_vert_chroma_subsampling(fb->format->format);
if (((state->src_x >> 16) & (hsub - 1)) ||
((state->src_y >> 16) & (vsub - 1)))
return -EINVAL;
@@ -392,13 +392,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
case IPU_DP_FLOW_SYNC_FG:
- ics = ipu_drm_fourcc_to_colorspace(state->fb->pixel_format);
+ ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
ipu_dp_setup_channel(ipu_plane->dp, ics,
IPUV3_COLORSPACE_UNKNOWN);
ipu_dp_set_window_pos(ipu_plane->dp, state->crtc_x,
state->crtc_y);
/* Enable local alpha on partial plane */
- switch (state->fb->pixel_format) {
+ switch (state->fb->format->format) {
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
@@ -421,11 +421,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_zero(ipu_plane->ipu_ch);
ipu_cpmem_set_resolution(ipu_plane->ipu_ch, state->src_w >> 16,
state->src_h >> 16);
- ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->pixel_format);
+ ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->format->format);
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
@@ -434,9 +434,9 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
case DRM_FORMAT_YVU444:
ubo = drm_plane_state_to_ubo(state);
vbo = drm_plane_state_to_vbo(state);
- if (fb->pixel_format == DRM_FORMAT_YVU420 ||
- fb->pixel_format == DRM_FORMAT_YVU422 ||
- fb->pixel_format == DRM_FORMAT_YVU444)
+ if (fb->format->format == DRM_FORMAT_YVU420 ||
+ fb->format->format == DRM_FORMAT_YVU422 ||
+ fb->format->format == DRM_FORMAT_YVU444)
swap(ubo, vbo);
ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 8582a83c0d9b..d5c06fd89f90 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -191,9 +191,7 @@ static int imx_pd_register(struct drm_device *drm,
drm_panel_attach(imxpd->panel, &imxpd->connector);
if (imxpd->bridge) {
- imxpd->bridge->encoder = encoder;
- encoder->bridge = imxpd->bridge;
- ret = drm_bridge_attach(drm, imxpd->bridge);
+ ret = drm_bridge_attach(encoder, imxpd->bridge, NULL);
if (ret < 0) {
dev_err(imxpd->dev, "failed to attach bridge: %d\n",
ret);
@@ -286,8 +284,6 @@ static void imx_pd_unbind(struct device *dev, struct device *master,
{
struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
- if (imxpd->bridge)
- drm_bridge_detach(imxpd->bridge);
if (imxpd->panel)
drm_panel_detach(imxpd->panel);
diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c
new file mode 100644
index 000000000000..7b12a68c3b54
--- /dev/null
+++ b/drivers/gpu/drm/lib/drm_random.c
@@ -0,0 +1,41 @@
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "drm_random.h"
+
+static inline u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
+{
+ return upper_32_bits((u64)prandom_u32_state(state) * ep_ro);
+}
+
+void drm_random_reorder(unsigned int *order, unsigned int count,
+ struct rnd_state *state)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < count; ++i) {
+ BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32));
+ j = drm_prandom_u32_max_state(count, state);
+ swap(order[i], order[j]);
+ }
+}
+EXPORT_SYMBOL(drm_random_reorder);
+
+unsigned int *drm_random_order(unsigned int count, struct rnd_state *state)
+{
+ unsigned int *order, i;
+
+ order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
+ if (!order)
+ return order;
+
+ for (i = 0; i < count; i++)
+ order[i] = i;
+
+ drm_random_reorder(order, count, state);
+ return order;
+}
+EXPORT_SYMBOL(drm_random_order);
diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/drm/lib/drm_random.h
new file mode 100644
index 000000000000..a78644bea7f9
--- /dev/null
+++ b/drivers/gpu/drm/lib/drm_random.h
@@ -0,0 +1,25 @@
+#ifndef __DRM_RANDOM_H__
+#define __DRM_RANDOM_H__
+
+/* This is a temporary home for a couple of utility functions that should
+ * be transposed to lib/ at the earliest convenience.
+ */
+
+#include <linux/random.h>
+
+#define DRM_RND_STATE_INITIALIZER(seed__) ({ \
+ struct rnd_state state__; \
+ prandom_seed_state(&state__, (seed__)); \
+ state__; \
+})
+
+#define DRM_RND_STATE(name__, seed__) \
+ struct rnd_state name__ = DRM_RND_STATE_INITIALIZER(seed__)
+
+unsigned int *drm_random_order(unsigned int count,
+ struct rnd_state *state);
+void drm_random_reorder(unsigned int *order,
+ unsigned int count,
+ struct rnd_state *state);
+
+#endif /* !__DRM_RANDOM_H__ */
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 90fb831ef031..3bd3bd688d1a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -63,6 +63,7 @@ enum mtk_dpi_out_color_format {
struct mtk_dpi {
struct mtk_ddp_comp ddp_comp;
struct drm_encoder encoder;
+ struct drm_bridge *bridge;
void __iomem *regs;
struct device *dev;
struct clk *engine_clk;
@@ -620,8 +621,7 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
/* Currently DPI0 is fixed to be driven by OVL1 */
dpi->encoder.possible_crtcs = BIT(1);
- dpi->encoder.bridge->encoder = &dpi->encoder;
- ret = drm_bridge_attach(dpi->encoder.dev, dpi->encoder.bridge);
+ ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL);
if (ret) {
dev_err(dev, "Failed to attach bridge: %d\n", ret);
goto err_cleanup;
@@ -718,9 +718,9 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dev_info(dev, "Found bridge node: %s\n", bridge_node->full_name);
- dpi->encoder.bridge = of_drm_find_bridge(bridge_node);
+ dpi->bridge = of_drm_find_bridge(bridge_node);
of_node_put(bridge_node);
- if (!dpi->encoder.bridge)
+ if (!dpi->bridge)
return -EPROBE_DEFER;
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 01a21dd835b5..a73de1e669c2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -170,8 +170,8 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe)
{
- struct mtk_drm_private *priv = drm->dev_private;
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(priv->crtc[pipe]);
+ struct drm_crtc *crtc = drm_crtc_from_index(drm, pipe);
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base);
@@ -181,8 +181,8 @@ int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe)
void mtk_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe)
{
- struct mtk_drm_private *priv = drm->dev_private;
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(priv->crtc[pipe]);
+ struct drm_crtc *crtc = drm_crtc_from_index(drm, pipe);
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
mtk_ddp_comp_disable_vblank(ovl);
@@ -588,7 +588,6 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
goto unprepare;
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
- priv->crtc[pipe] = &mtk_crtc->base;
priv->num_pipes++;
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 4b7fe7eaec01..b5f88e6d078e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -321,7 +321,8 @@ static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
- drm_put_dev(private->drm);
+ drm_dev_unregister(private->drm);
+ drm_dev_unref(private->drm);
private->drm = NULL;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index aa9389446785..df322a7a5fcb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -32,7 +32,6 @@ struct mtk_drm_private {
struct drm_device *drm;
struct device *dma_dev;
- struct drm_crtc *crtc[MAX_CRTC];
unsigned int num_pipes;
struct device_node *mutex_node;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index 147df85399ab..d4246c9dceae 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -82,7 +82,7 @@ static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev,
if (!mtk_fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(&mtk_fb->base, mode);
+ drm_helper_mode_fill_fb_struct(dev, &mtk_fb->base, mode);
mtk_fb->gem_obj = obj;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index c461a232cbf5..e405e89ed5e5 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -133,9 +133,9 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
pitch = fb->pitches[0];
- format = fb->pixel_format;
+ format = fb->format->format;
- addr += (plane->state->src.x1 >> 16) * drm_format_plane_cpp(format, 0);
+ addr += (plane->state->src.x1 >> 16) * fb->format->cpp[0];
addr += (plane->state->src.y1 >> 16) * pitch;
state->pending.enable = true;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 2c42f90809d8..dd71cbb1a622 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -622,26 +622,6 @@ static const struct drm_connector_helper_funcs
.get_modes = mtk_dsi_connector_get_modes,
};
-static int mtk_drm_attach_bridge(struct drm_bridge *bridge,
- struct drm_encoder *encoder)
-{
- int ret;
-
- if (!bridge)
- return -ENOENT;
-
- encoder->bridge = bridge;
- bridge->encoder = encoder;
- ret = drm_bridge_attach(encoder->dev, bridge);
- if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
- encoder->bridge = NULL;
- bridge->encoder = NULL;
- }
-
- return ret;
-}
-
static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
{
int ret;
@@ -692,8 +672,10 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
dsi->encoder.possible_crtcs = 1;
/* If there's a bridge, attach to it and let it create the connector */
- ret = mtk_drm_attach_bridge(dsi->bridge, &dsi->encoder);
+ ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
if (ret) {
+ DRM_ERROR("Failed to attach bridge to drm\n");
+
/* Otherwise create our own connector and attach to a panel */
ret = mtk_dsi_create_connector(drm, dsi);
if (ret)
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 0e8c4d9af340..c26251260b83 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -149,6 +149,7 @@ struct hdmi_audio_param {
struct mtk_hdmi {
struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
struct drm_connector conn;
struct device *dev;
struct phy *phy;
@@ -1314,9 +1315,9 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
return ret;
}
- if (bridge->next) {
- bridge->next->encoder = bridge->encoder;
- ret = drm_bridge_attach(bridge->encoder->dev, bridge->next);
+ if (hdmi->next_bridge) {
+ ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ bridge);
if (ret) {
dev_err(hdmi->dev,
"Failed to attach external bridge: %d\n", ret);
@@ -1510,8 +1511,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(ep);
if (!of_device_is_compatible(remote, "hdmi-connector")) {
- hdmi->bridge.next = of_drm_find_bridge(remote);
- if (!hdmi->bridge.next) {
+ hdmi->next_bridge = of_drm_find_bridge(remote);
+ if (!hdmi->next_bridge) {
dev_err(dev, "Waiting for external bridge\n");
of_node_put(remote);
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index 2591978b8aad..92cf84530f49 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,4 +1,4 @@
-meson-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
-meson-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
+meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
+meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
-obj-$(CONFIG_DRM_MESON) += meson.o
+obj-$(CONFIG_DRM_MESON) += meson-drm.o
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index ff1f6019b97b..6f2fd82ed483 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -279,7 +279,6 @@ static int meson_drv_probe(struct platform_device *pdev)
drm->mode_config.funcs = &meson_mode_config_funcs;
priv->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_crtc,
drm->mode_config.num_connector);
if (IS_ERR(priv->fbdev)) {
ret = PTR_ERR(priv->fbdev);
@@ -329,8 +328,7 @@ static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
.remove = meson_drv_remove,
.driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
+ .name = "meson-drm",
.of_match_table = dt_match,
},
};
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 7890e30eb584..a32d3b6e2e12 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -116,7 +116,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
priv->viu.osd1_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_XRGB8888:
/* For XRGB, replace the pixel's alpha by 0xFF */
writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 1f2f9ca25901..1ffdafea27e4 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -392,6 +392,24 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
drm_mga_private_t *dev_priv;
int ret;
+ /* There are PCI versions of the G450. These cards have the
+ * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
+ * bridge chip. We detect these cards, which are not currently
+ * supported by this driver, by looking at the device ID of the
+ * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
+ * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
+ * device.
+ */
+ if ((dev->pdev->device == 0x0525) && dev->pdev->bus->self
+ && (dev->pdev->bus->self->vendor == 0x3388)
+ && (dev->pdev->bus->self->device == 0x0021)
+ && dev->agp) {
+ /* FIXME: This should be quirked in the pci core, but oh well
+ * the hw probably stopped existing. */
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ kfree(dev->agp);
+ dev->agp = NULL;
+ }
dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
if (!dev_priv)
return -ENOMEM;
@@ -698,7 +716,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
static int mga_do_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
- const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
+ const int is_agp = (dma_bs->agp_mode != 0) && dev->agp;
int err;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -1127,12 +1145,10 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
/**
* Called just before the module is unloaded.
*/
-int mga_driver_unload(struct drm_device *dev)
+void mga_driver_unload(struct drm_device *dev)
{
kfree(dev->dev_private);
dev->dev_private = NULL;
-
- return 0;
}
/**
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 25b2a1a424e6..63ba0699d107 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -37,8 +37,6 @@
#include <drm/drm_pciids.h>
-static int mga_driver_device_is_agp(struct drm_device *dev);
-
static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
};
@@ -66,7 +64,6 @@ static struct drm_driver driver = {
.lastclose = mga_driver_lastclose,
.set_busid = drm_pci_set_busid,
.dma_quiescent = mga_driver_dma_quiescent,
- .device_is_agp = mga_driver_device_is_agp,
.get_vblank_counter = mga_get_vblank_counter,
.enable_vblank = mga_enable_vblank,
.disable_vblank = mga_disable_vblank,
@@ -107,37 +104,3 @@ module_exit(mga_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
-
-/**
- * Determine if the device really is AGP or not.
- *
- * In addition to the usual tests performed by \c drm_device_is_agp, this
- * function detects PCI G450 cards that appear to the system exactly like
- * AGP G450 cards.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
- */
-static int mga_driver_device_is_agp(struct drm_device *dev)
-{
- const struct pci_dev *const pdev = dev->pdev;
-
- /* There are PCI versions of the G450. These cards have the
- * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
- * bridge chip. We detect these cards, which are not currently
- * supported by this driver, by looking at the device ID of the
- * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
- * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
- * device.
- */
-
- if ((pdev->device == 0x0525) && pdev->bus->self
- && (pdev->bus->self->vendor == 0x3388)
- && (pdev->bus->self->device == 0x0021)) {
- return 0;
- }
-
- return 2;
-}
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index bb312339e0b0..d5ce829b3199 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -166,7 +166,7 @@ extern int mga_dma_reset(struct drm_device *dev, void *data,
extern int mga_dma_buffers(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
-extern int mga_driver_unload(struct drm_device *dev);
+extern void mga_driver_unload(struct drm_device *dev);
extern void mga_driver_lastclose(struct drm_device *dev);
extern int mga_driver_dma_quiescent(struct drm_device *dev);
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 520e5e668d6c..db58578719d2 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,6 +1,6 @@
config DRM_MGAG200
tristate "Kernel modesetting driver for MGA G200 server engines"
- depends on DRM && PCI
+ depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_TTM
help
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index b0b874264f9d..9ac007880328 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -36,6 +36,7 @@ static const struct pci_device_id pciidlist[] = {
{ PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
{ PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
{ PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 },
+ { PCI_VENDOR_ID_MATROX, 0x538, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH3 },
{0,}
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 3e02ac20777c..c88b6ec88dd2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -15,6 +15,7 @@
#include <video/vga.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -179,6 +180,7 @@ enum mga_type {
G200_WB,
G200_EV,
G200_EH,
+ G200_EH3,
G200_ER,
G200_EW3,
};
@@ -257,7 +259,7 @@ int mgag200_framebuffer_init(struct drm_device *dev,
int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
-int mgag200_driver_unload(struct drm_device *dev);
+void mgag200_driver_unload(struct drm_device *dev);
int mgag200_gem_create(struct drm_device *dev,
u32 size, bool iskernel,
struct drm_gem_object **obj);
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 88dd2214114d..a449bb91213a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -24,7 +24,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
struct drm_gem_object *obj;
struct mgag200_bo *bo;
int src_offset, dst_offset;
- int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
+ int bpp = mfbdev->mfb.base.format->cpp[0];
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
@@ -217,7 +217,7 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = mdev->mc.vram_size;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
sizes->fb_height);
@@ -286,7 +286,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
- mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+ MGAG200FB_CONN_LIMIT);
if (ret)
goto err_fb_helper;
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 10535e3b75f2..77d1c4771786 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -106,6 +106,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
clock = 2;
break;
case G200_EH:
+ case G200_EH3:
case G200_ER:
data = 2;
clock = 1;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index e79cbc25ae3c..dce8a3eb5a10 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -34,7 +34,7 @@ int mgag200_framebuffer_init(struct drm_device *dev,
{
int ret;
- drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
gfb->obj = obj;
ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
if (ret) {
@@ -145,6 +145,8 @@ static int mga_vram_init(struct mga_device *mdev)
}
mem = pci_iomap(mdev->dev->pdev, 0, 0);
+ if (!mem)
+ return -ENOMEM;
mdev->mc.vram_size = mga_probe_vram(mdev, mem);
@@ -262,18 +264,17 @@ err_mm:
return r;
}
-int mgag200_driver_unload(struct drm_device *dev)
+void mgag200_driver_unload(struct drm_device *dev)
{
struct mga_device *mdev = dev->dev_private;
if (mdev == NULL)
- return 0;
+ return;
mgag200_modeset_fini(mdev);
mgag200_fbdev_fini(mdev);
drm_mode_config_cleanup(dev);
mgag200_mm_fini(mdev);
dev->dev_private = NULL;
- return 0;
}
int mgag200_gem_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 3a03ac4045d8..3938120e5051 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -38,11 +38,11 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
WREG8(DAC_INDEX + MGA1064_INDEX, 0);
- if (fb && fb->bits_per_pixel == 16) {
- int inc = (fb->depth == 15) ? 8 : 4;
+ if (fb && fb->format->cpp[0] * 8 == 16) {
+ int inc = (fb->format->depth == 15) ? 8 : 4;
u8 r, b;
for (i = 0; i < MGAG200_LUT_SIZE; i += inc) {
- if (fb->depth == 16) {
+ if (fb->format->depth == 16) {
if (i > (MGAG200_LUT_SIZE >> 1)) {
r = b = 0;
} else {
@@ -497,34 +497,70 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
bool pll_locked = false;
m = n = p = 0;
- vcomax = 800000;
- vcomin = 400000;
- pllreffreq = 33333;
- delta = 0xffffffff;
+ if (mdev->type == G200_EH3) {
+ vcomax = 3000000;
+ vcomin = 1500000;
+ pllreffreq = 25000;
- for (testp = 16; testp > 0; testp >>= 1) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
+ delta = 0xffffffff;
- for (testm = 1; testm < 33; testm++) {
- for (testn = 17; testn < 257; testn++) {
- computed = (pllreffreq * testn) /
- (testm * testp);
+ testp = 0;
+
+ for (testm = 150; testm >= 6; testm--) {
+ if (clock * testm > vcomax)
+ continue;
+ if (clock * testm < vcomin)
+ continue;
+ for (testn = 120; testn >= 60; testn--) {
+ computed = (pllreffreq * testn) / testm;
if (computed > clock)
tmpdelta = computed - clock;
else
tmpdelta = clock - computed;
if (tmpdelta < delta) {
delta = tmpdelta;
- n = testn - 1;
- m = (testm - 1);
- p = testp - 1;
+ n = testn;
+ m = testm;
+ p = testp;
+ }
+ if (delta == 0)
+ break;
+ }
+ if (delta == 0)
+ break;
+ }
+ } else {
+
+ vcomax = 800000;
+ vcomin = 400000;
+ pllreffreq = 33333;
+
+ delta = 0xffffffff;
+
+ for (testp = 16; testp > 0; testp >>= 1) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 33; testm++) {
+ for (testn = 17; testn < 257; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1);
+ p = testp - 1;
+ }
+ if ((clock * testp) >= 600000)
+ p |= 0x80;
}
- if ((clock * testp) >= 600000)
- p |= 0x80;
}
}
}
@@ -674,6 +710,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
return mga_g200ev_set_plls(mdev, clock);
break;
case G200_EH:
+ case G200_EH3:
return mga_g200eh_set_plls(mdev, clock);
break;
case G200_ER:
@@ -880,6 +917,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = dev->dev_private;
+ const struct drm_framebuffer *fb = crtc->primary->fb;
int hdisplay, hsyncstart, hsyncend, htotal;
int vdisplay, vsyncstart, vsyncend, vtotal;
int pitch;
@@ -902,7 +940,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
/* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
};
- bppshift = mdev->bpp_shifts[(crtc->primary->fb->bits_per_pixel >> 3) - 1];
+ bppshift = mdev->bpp_shifts[fb->format->cpp[0] - 1];
switch (mdev->type) {
case G200_SE_A:
@@ -932,6 +970,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
option2 = 0x0000b000;
break;
case G200_EH:
+ case G200_EH3:
dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
MGA1064_MISC_CTL_DAC_RAM_CS;
option = 0x00000120;
@@ -941,12 +980,12 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
break;
}
- switch (crtc->primary->fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 8:
dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
break;
case 16:
- if (crtc->primary->fb->depth == 15)
+ if (fb->format->depth == 15)
dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
else
dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
@@ -978,7 +1017,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if ((mdev->type == G200_EV ||
mdev->type == G200_WB ||
mdev->type == G200_EH ||
- mdev->type == G200_EW3) &&
+ mdev->type == G200_EW3 ||
+ mdev->type == G200_EH3) &&
(i >= 0x44) && (i <= 0x4e))
continue;
@@ -997,8 +1037,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
WREG_SEQ(3, 0);
WREG_SEQ(4, 0xe);
- pitch = crtc->primary->fb->pitches[0] / (crtc->primary->fb->bits_per_pixel / 8);
- if (crtc->primary->fb->bits_per_pixel == 24)
+ pitch = fb->pitches[0] / fb->format->cpp[0];
+ if (fb->format->cpp[0] * 8 == 24)
pitch = (pitch * 3) >> (4 - bppshift);
else
pitch = pitch >> (4 - bppshift);
@@ -1075,7 +1115,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
((vdisplay & 0xc00) >> 7) |
((vsyncstart & 0xc00) >> 5) |
((vdisplay & 0x400) >> 3);
- if (crtc->primary->fb->bits_per_pixel == 24)
+ if (fb->format->cpp[0] * 8 == 24)
ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
else
ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
@@ -1138,9 +1178,9 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
u32 bpp;
u32 mb;
- if (crtc->primary->fb->bits_per_pixel > 16)
+ if (fb->format->cpp[0] * 8 > 16)
bpp = 32;
- else if (crtc->primary->fb->bits_per_pixel > 8)
+ else if (fb->format->cpp[0] * 8 > 8)
bpp = 16;
else
bpp = 8;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 5e20220ef4c6..657598bb1e6b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -236,8 +236,6 @@ struct ttm_bo_driver mgag200_bo_driver = {
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
- .lru_tail = &ttm_bo_default_lru_tail,
- .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int mgag200_mm_init(struct mga_device *mdev)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index d96b2b6898a3..5b8e23d051f2 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -4,6 +4,7 @@ config DRM_MSM
depends on DRM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
+ depends on MMU
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
@@ -71,3 +72,10 @@ config DRM_MSM_DSI_28NM_8960_PHY
help
Choose this option if the 28nm DSI PHY 8960 variant is used on the
platform.
+
+config DRM_MSM_DSI_14NM_PHY
+ bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if DSI PHY on 8996 is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 028c24df2291..39055362da95 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -76,11 +76,13 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
+msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
msm-y += dsi/pll/dsi_pll.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
+msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
endif
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b8647198c11c..4414cf73735d 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -12,6 +12,7 @@
*/
#include "msm_gem.h"
+#include "msm_mmu.h"
#include "a5xx_gpu.h"
extern bool hang_debug;
@@ -327,7 +328,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* Enable RBBM error reporting bits */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
- if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
/*
* Mask out the activity signals from RB1-3 to avoid false
* positives
@@ -381,7 +382,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
- if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
@@ -573,6 +574,19 @@ static bool a5xx_idle(struct msm_gpu *gpu)
return true;
}
+static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
+{
+ struct msm_gpu *gpu = arg;
+ pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
+ iova, flags,
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
+
+ return -EFAULT;
+}
+
static void a5xx_cp_err_irq(struct msm_gpu *gpu)
{
u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
@@ -884,5 +898,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret);
}
+ if (gpu->aspace)
+ msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 893eb2b2531b..ece39b16a864 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -75,12 +75,14 @@ static const struct adreno_info gpulist[] = {
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
}, {
- .rev = ADRENO_REV(5, 3, 0, ANY_ID),
+ .rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530,
.name = "A530",
.pm4fw = "a530_pm4.fw",
.pfpfw = "a530_pfp.fw",
.gmem = SZ_1M,
+ .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
+ ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
.gpmufw = "a530v3_gpmu.fw2",
},
@@ -181,22 +183,51 @@ static void set_gpu_pdev(struct drm_device *dev,
priv->gpu_pdev = pdev;
}
-static const struct {
- const char *str;
- uint32_t flag;
-} quirks[] = {
- { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
- { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
-};
+static int find_chipid(struct device *dev, u32 *chipid)
+{
+ struct device_node *node = dev->of_node;
+ const char *compat;
+ int ret;
+
+ /* first search the compat strings for qcom,adreno-XYZ.W: */
+ ret = of_property_read_string_index(node, "compatible", 0, &compat);
+ if (ret == 0) {
+ unsigned rev, patch;
+
+ if (sscanf(compat, "qcom,adreno-%u.%u", &rev, &patch) == 2) {
+ *chipid = 0;
+ *chipid |= (rev / 100) << 24; /* core */
+ rev %= 100;
+ *chipid |= (rev / 10) << 16; /* major */
+ rev %= 10;
+ *chipid |= rev << 8; /* minor */
+ *chipid |= patch;
+
+ return 0;
+ }
+ }
+
+ /* and if that fails, fall back to legacy "qcom,chipid" property: */
+ ret = of_property_read_u32(node, "qcom,chipid", chipid);
+ if (ret)
+ return ret;
+
+ dev_warn(dev, "Using legacy qcom,chipid binding!\n");
+ dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
+ (*chipid >> 24) & 0xff, (*chipid >> 16) & 0xff,
+ (*chipid >> 8) & 0xff, *chipid & 0xff);
+
+ return 0;
+}
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
struct device_node *child, *node = dev->of_node;
u32 val;
- int ret, i;
+ int ret;
- ret = of_property_read_u32(node, "qcom,chipid", &val);
+ ret = find_chipid(dev, &val);
if (ret) {
dev_err(dev, "could not find chipid: %d\n", ret);
return ret;
@@ -224,14 +255,12 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
}
if (!config.fast_rate) {
- dev_err(dev, "could not find clk rates\n");
- return -ENXIO;
+ dev_warn(dev, "could not find clk rates\n");
+ /* This is a safe low speed for all devices: */
+ config.fast_rate = 200000000;
+ config.slow_rate = 27000000;
}
- for (i = 0; i < ARRAY_SIZE(quirks); i++)
- if (of_property_read_bool(node, quirks[i].str))
- config.quirks |= quirks[i].flag;
-
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
@@ -260,6 +289,7 @@ static int adreno_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
/* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" },
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 686a580c711a..c9bd1e6225f4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -352,7 +352,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->gmem = adreno_gpu->info->gmem;
adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev;
- adreno_gpu->quirks = config->quirks;
gpu->fast_rate = config->fast_rate;
gpu->slow_rate = config->slow_rate;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index e8d55b0306ed..42e444a67630 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -75,6 +75,7 @@ struct adreno_info {
const char *pm4fw, *pfpfw;
const char *gpmufw;
uint32_t gmem;
+ enum adreno_quirks quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
};
@@ -116,8 +117,6 @@ struct adreno_gpu {
* code (a3xx_gpu.c) and stored in this common location.
*/
const unsigned int *reg_offsets;
-
- uint32_t quirks;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@@ -128,7 +127,6 @@ struct adreno_platform_config {
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
#endif
- uint32_t quirks;
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index ec572f8389ed..311c1c1e7d6c 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -18,9 +18,7 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
return NULL;
- return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ?
- msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
- msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
+ return msm_dsi->encoder;
}
static int dsi_get_phy(struct msm_dsi *msm_dsi)
@@ -187,14 +185,13 @@ void __exit msm_dsi_unregister(void)
}
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
- struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+ struct drm_encoder *encoder)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_bridge *ext_bridge;
- int ret, i;
+ int ret;
- if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
- !encoders[MSM_DSI_CMD_ENCODER_ID]))
+ if (WARN_ON(!encoder))
return -EINVAL;
msm_dsi->dev = dev;
@@ -205,6 +202,8 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
+ msm_dsi->encoder = encoder;
+
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
if (IS_ERR(msm_dsi->bridge)) {
ret = PTR_ERR(msm_dsi->bridge);
@@ -213,11 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
- encoders[i]->bridge = msm_dsi->bridge;
- msm_dsi->encoders[i] = encoders[i];
- }
-
/*
* check if the dsi encoder output is connected to a panel or an
* external bridge. We create a connector only if we're connected to a
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 03f115f532c2..32369975d155 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -27,14 +27,24 @@
#define DSI_1 1
#define DSI_MAX 2
+struct msm_dsi_phy_shared_timings;
+struct msm_dsi_phy_clk_request;
+
enum msm_dsi_phy_type {
MSM_DSI_PHY_28NM_HPM,
MSM_DSI_PHY_28NM_LP,
MSM_DSI_PHY_20NM,
MSM_DSI_PHY_28NM_8960,
+ MSM_DSI_PHY_14NM,
MSM_DSI_PHY_MAX
};
+enum msm_dsi_phy_usecase {
+ MSM_DSI_PHY_STANDALONE,
+ MSM_DSI_PHY_MASTER,
+ MSM_DSI_PHY_SLAVE,
+};
+
#define DSI_DEV_REGULATOR_MAX 8
#define DSI_BUS_CLK_MAX 4
@@ -73,8 +83,8 @@ struct msm_dsi {
struct device *phy_dev;
bool phy_enabled;
- /* the encoders we are hooked to (outside of dsi block) */
- struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM];
+ /* the encoder we are hooked to (outside of dsi block) */
+ struct drm_encoder *encoder;
int id;
};
@@ -84,12 +94,9 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
struct drm_connector *msm_dsi_manager_connector_init(u8 id);
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
-int msm_dsi_manager_phy_enable(int id,
- const unsigned long bit_rate, const unsigned long esc_rate,
- u32 *clk_pre, u32 *clk_post);
-void msm_dsi_manager_phy_disable(int id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
+void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
@@ -111,6 +118,8 @@ int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
+int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+ enum msm_dsi_phy_usecase uc);
#else
static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id) {
@@ -131,6 +140,11 @@ static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
{
return 0;
}
+static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+ enum msm_dsi_phy_usecase uc)
+{
+ return -ENODEV;
+}
#endif
/* dsi host */
@@ -146,7 +160,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
u32 dma_base, u32 len);
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
-int msm_dsi_host_power_on(struct mipi_dsi_host *host);
+int msm_dsi_host_power_on(struct mipi_dsi_host *host,
+ struct msm_dsi_phy_shared_timings *phy_shared_timings);
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode);
@@ -157,6 +172,9 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll);
+void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
+void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
+ struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
@@ -164,14 +182,27 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
/* dsi phy */
struct msm_dsi_phy;
+struct msm_dsi_phy_shared_timings {
+ u32 clk_post;
+ u32 clk_pre;
+ bool clk_pre_inc_by_2;
+};
+
+struct msm_dsi_phy_clk_request {
+ unsigned long bitclk_rate;
+ unsigned long escclk_rate;
+};
+
void msm_dsi_phy_driver_register(void);
void msm_dsi_phy_driver_unregister(void);
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
- const unsigned long bit_rate, const unsigned long esc_rate);
+ struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
-void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
- u32 *clk_pre, u32 *clk_post);
+void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_shared_timings *shared_timing);
struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
+void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
+ enum msm_dsi_phy_usecase uc);
#endif /* __DSI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 39dff7d5e89b..b3d70ea42891 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
-
-Copyright (C) 2013-2015 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-01-11 05:19:19)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
+
+Copyright (C) 2013-2017 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -1304,5 +1295,257 @@ static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK;
+}
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK;
+}
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014
+#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018
+#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020
+
+#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044
+
+#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048
+#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6
+static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; }
+
+#define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000
+
+#define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004
+
+#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010
+
+#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c
+
+#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c
+
+#define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088
+
+#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c
+
+#define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090
+
+#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc
+
+#define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4
+
+#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8
+
+#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104
+
+#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108
+
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 63436d8ee470..a5d75c9b3a73 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -94,6 +94,30 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
.num_dsi = 2,
};
+/*
+ * TODO: core_mmss_clk fails to enable for some reason, but things work fine
+ * without it too. Figure out why it doesn't enable and uncomment below
+ */
+static const char * const dsi_8996_bus_clk_names[] = {
+ "mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */
+};
+
+static const struct msm_dsi_config msm8996_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 2,
+ .regs = {
+ {"vdda", 18160, 1 }, /* 1.25 V */
+ {"vcca", 17000, 32 }, /* 0.925 V */
+ {"vddio", 100000, 100 },/* 1.8 V */
+ },
+ },
+ .bus_clk_names = dsi_8996_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
+ .io_start = { 0x994000, 0x996000 },
+ .num_dsi = 2,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
@@ -106,6 +130,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8974_apq8084_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index eeacc3232494..00a5da2663c6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -24,6 +24,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
+#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 3819fdefcae2..1fc07ce24686 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -691,17 +691,6 @@ static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
return 0;
}
-static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
-{
- DBG("");
- dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
- /* Make sure fully reset */
- wmb();
- udelay(1000);
- dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
- udelay(100);
-}
-
static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
{
u32 intr;
@@ -756,7 +745,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
}
static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
- u32 clk_pre, u32 clk_post)
+ struct msm_dsi_phy_shared_timings *phy_shared_timings)
{
u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
@@ -819,10 +808,16 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
- data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
- DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
+ data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
+ DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
+ if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+ (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
+ phy_shared_timings->clk_pre_inc_by_2)
+ dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
+ DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
+
data = 0;
if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
@@ -1482,6 +1477,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
+ msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags);
+
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
if (ret)
@@ -1557,8 +1554,9 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
prop = of_find_property(ep, "data-lanes", &len);
if (!prop) {
- dev_dbg(dev, "failed to find data lane mapping\n");
- return -EINVAL;
+ dev_dbg(dev,
+ "failed to find data lane mapping, using default\n");
+ return 0;
}
num_lanes = len / sizeof(u32);
@@ -1615,7 +1613,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
struct device *dev = &msm_host->pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *endpoint, *device_node;
- int ret;
+ int ret = 0;
/*
* Get the endpoint of the output port of the DSI host. In our case,
@@ -1639,8 +1637,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
/* Get panel node from the output port's endpoint data */
device_node = of_graph_get_remote_port_parent(endpoint);
if (!device_node) {
- dev_err(dev, "%s: no valid device\n", __func__);
- ret = -ENODEV;
+ dev_dbg(dev, "%s: no valid device\n", __func__);
goto err;
}
@@ -2118,6 +2115,28 @@ exit:
return ret;
}
+void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ DBG("");
+ dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
+ /* Make sure fully reset */
+ wmb();
+ udelay(1000);
+ dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
+ udelay(100);
+}
+
+void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
+ clk_req->escclk_rate = msm_host->esc_clk_rate;
+}
+
int msm_dsi_host_enable(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@@ -2165,10 +2184,10 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
SFPB_GPREG_MASTER_PORT_EN(en));
}
-int msm_dsi_host_power_on(struct mipi_dsi_host *host)
+int msm_dsi_host_power_on(struct mipi_dsi_host *host,
+ struct msm_dsi_phy_shared_timings *phy_shared_timings)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- u32 clk_pre = 0, clk_post = 0;
int ret = 0;
mutex_lock(&msm_host->dev_mutex);
@@ -2179,12 +2198,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
msm_dsi_sfpb_config(msm_host, true);
- ret = dsi_calc_clk_rate(msm_host);
- if (ret) {
- pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
- goto unlock_ret;
- }
-
ret = dsi_host_regulator_enable(msm_host);
if (ret) {
pr_err("%s:Failed to enable vregs.ret=%d\n",
@@ -2192,23 +2205,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
goto unlock_ret;
}
- ret = dsi_bus_clk_enable(msm_host);
- if (ret) {
- pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
- goto fail_disable_reg;
- }
-
- dsi_phy_sw_reset(msm_host);
- ret = msm_dsi_manager_phy_enable(msm_host->id,
- msm_host->byte_clk_rate * 8,
- msm_host->esc_clk_rate,
- &clk_pre, &clk_post);
- dsi_bus_clk_disable(msm_host);
- if (ret) {
- pr_err("%s: failed to enable phy, %d\n", __func__, ret);
- goto fail_disable_reg;
- }
-
ret = dsi_clk_ctrl(msm_host, 1);
if (ret) {
pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
@@ -2224,7 +2220,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
dsi_timing_setup(msm_host);
dsi_sw_reset(msm_host);
- dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
+ dsi_ctrl_config(msm_host, true, phy_shared_timings);
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 1);
@@ -2253,15 +2249,13 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
goto unlock_ret;
}
- dsi_ctrl_config(msm_host, false, 0, 0);
+ dsi_ctrl_config(msm_host, false, NULL);
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 0);
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
- msm_dsi_manager_phy_disable(msm_host->id);
-
dsi_clk_ctrl(msm_host, 0);
dsi_host_regulator_disable(msm_host);
@@ -2281,6 +2275,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
if (msm_host->mode) {
drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,6 +2288,12 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
return -ENOMEM;
}
+ ret = dsi_calc_clk_rate(msm_host);
+ if (ret) {
+ pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index c8d1f19c9a6d..921270ea6059 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -72,11 +72,12 @@ static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
return 0;
}
-static int dsi_mgr_host_register(int id)
+static int dsi_mgr_setup_components(int id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+ struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
struct msm_dsi_pll *src_pll;
int ret;
@@ -85,15 +86,16 @@ static int dsi_mgr_host_register(int id)
if (ret)
return ret;
+ msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
} else if (!other_dsi) {
ret = 0;
} else {
- struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ?
- msm_dsi : other_dsi;
- struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ?
- other_dsi : msm_dsi;
+ struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
+ msm_dsi : other_dsi;
+ struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
+ other_dsi : msm_dsi;
/* Register slave host first, so that slave DSI device
* has a chance to probe, and do not block the master
* DSI device's probe.
@@ -101,14 +103,18 @@ static int dsi_mgr_host_register(int id)
* because only master DSI device adds the panel to global
* panel list. The panel's device is the master DSI device.
*/
- ret = msm_dsi_host_register(sdsi->host, false);
+ ret = msm_dsi_host_register(slave_link_dsi->host, false);
if (ret)
return ret;
- ret = msm_dsi_host_register(mdsi->host, true);
+ ret = msm_dsi_host_register(master_link_dsi->host, true);
if (ret)
return ret;
/* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */
+ msm_dsi_phy_set_usecase(clk_master_dsi->phy,
+ MSM_DSI_PHY_MASTER);
+ msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
+ MSM_DSI_PHY_SLAVE);
src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
if (ret)
@@ -119,6 +125,84 @@ static int dsi_mgr_host_register(int id)
return ret;
}
+static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
+ struct msm_dsi_phy_shared_timings *shared_timings)
+{
+ struct msm_dsi_phy_clk_request clk_req;
+ int ret;
+
+ msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req);
+
+ ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
+ msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
+
+ return ret;
+}
+
+static int
+dsi_mgr_phy_enable(int id,
+ struct msm_dsi_phy_shared_timings shared_timings[DSI_MAX])
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+ struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+ int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
+ int ret;
+
+ /* In case of dual DSI, some registers in PHY1 have been programmed
+ * during PLL0 clock's set_rate. The PHY1 reset called by host1 here
+ * will silently reset those PHY1 registers. Therefore we need to reset
+ * and enable both PHYs before any PLL clock operation.
+ */
+ if (IS_DUAL_DSI() && mdsi && sdsi) {
+ if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+ msm_dsi_host_reset_phy(mdsi->host);
+ msm_dsi_host_reset_phy(sdsi->host);
+
+ ret = enable_phy(mdsi, src_pll_id,
+ &shared_timings[DSI_CLOCK_MASTER]);
+ if (ret)
+ return ret;
+ ret = enable_phy(sdsi, src_pll_id,
+ &shared_timings[DSI_CLOCK_SLAVE]);
+ if (ret) {
+ msm_dsi_phy_disable(mdsi->phy);
+ return ret;
+ }
+ }
+ } else {
+ msm_dsi_host_reset_phy(mdsi->host);
+ ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
+ if (ret)
+ return ret;
+ }
+
+ msm_dsi->phy_enabled = true;
+
+ return 0;
+}
+
+static void dsi_mgr_phy_disable(int id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+ struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+
+ /* disable DSI phy
+ * In dual-dsi configuration, the phy should be disabled for the
+ * first controller only when the second controller is disabled.
+ */
+ msm_dsi->phy_enabled = false;
+ if (IS_DUAL_DSI() && mdsi && sdsi) {
+ if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+ msm_dsi_phy_disable(sdsi->phy);
+ msm_dsi_phy_disable(mdsi->phy);
+ }
+ } else {
+ msm_dsi_phy_disable(msm_dsi->phy);
+ }
+}
+
struct dsi_connector {
struct drm_connector base;
int id;
@@ -168,6 +252,16 @@ static enum drm_connector_status dsi_mgr_connector_detect(
msm_dsi->panel = msm_dsi_host_get_panel(
other_dsi->host, NULL);
+
+ if (msm_dsi->panel && kms->funcs->set_encoder_mode) {
+ bool cmd_mode = !(msm_dsi->device_flags &
+ MIPI_DSI_MODE_VIDEO);
+ struct drm_encoder *encoder =
+ msm_dsi_get_encoder(msm_dsi);
+
+ kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
+ }
+
if (msm_dsi->panel && IS_DUAL_DSI())
drm_object_attach_property(&connector->base,
connector->dev->mode_config.tile_property, 0);
@@ -344,22 +438,31 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
+ struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX];
bool is_dual_dsi = IS_DUAL_DSI();
int ret;
DBG("id=%d", id);
- if (!msm_dsi_device_connected(msm_dsi) ||
- (is_dual_dsi && (DSI_1 == id)))
+ if (!msm_dsi_device_connected(msm_dsi))
return;
- ret = msm_dsi_host_power_on(host);
+ ret = dsi_mgr_phy_enable(id, phy_shared_timings);
+ if (ret)
+ goto phy_en_fail;
+
+ /* Do nothing with the host if it is DSI 1 in case of dual DSI */
+ if (is_dual_dsi && (DSI_1 == id))
+ return;
+
+ ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]);
if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
goto host_on_fail;
}
if (is_dual_dsi && msm_dsi1) {
- ret = msm_dsi_host_power_on(msm_dsi1->host);
+ ret = msm_dsi_host_power_on(msm_dsi1->host,
+ &phy_shared_timings[DSI_1]);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
__func__, ret);
@@ -418,6 +521,8 @@ panel_prep_fail:
host1_on_fail:
msm_dsi_host_power_off(host);
host_on_fail:
+ dsi_mgr_phy_disable(id);
+phy_en_fail:
return;
}
@@ -443,10 +548,17 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
DBG("id=%d", id);
- if (!msm_dsi_device_connected(msm_dsi) ||
- (is_dual_dsi && (DSI_1 == id)))
+ if (!msm_dsi_device_connected(msm_dsi))
return;
+ /*
+ * Do nothing with the host if it is DSI 1 in case of dual DSI.
+ * It is safe to call dsi_mgr_phy_disable() here because a single PHY
+ * won't be diabled until both PHYs request disable.
+ */
+ if (is_dual_dsi && (DSI_1 == id))
+ goto disable_phy;
+
if (panel) {
ret = drm_panel_disable(panel);
if (ret)
@@ -481,6 +593,9 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
pr_err("%s: host1 power off failed, %d\n",
__func__, ret);
}
+
+disable_phy:
+ dsi_mgr_phy_disable(id);
}
static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
@@ -540,7 +655,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_connector *connector = NULL;
struct dsi_connector *dsi_connector;
- int ret, i;
+ int ret;
dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
if (!dsi_connector)
@@ -566,9 +681,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- for (i = 0; i < MSM_DSI_ENCODER_NUM; i++)
- drm_mode_connector_attach_encoder(connector,
- msm_dsi->encoders[i]);
+ drm_mode_connector_attach_encoder(connector, msm_dsi->encoder);
return connector;
}
@@ -579,6 +692,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_bridge *bridge = NULL;
struct dsi_bridge *dsi_bridge;
+ struct drm_encoder *encoder;
int ret;
dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
@@ -590,10 +704,12 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
dsi_bridge->id = id;
+ encoder = msm_dsi->encoder;
+
bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs;
- ret = drm_bridge_attach(msm_dsi->dev, bridge);
+ ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret)
goto fail;
@@ -619,20 +735,10 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
ext_bridge = msm_dsi->external_bridge =
msm_dsi_host_get_bridge(msm_dsi->host);
- /*
- * HACK: we may not know the external DSI bridge device's mode
- * flags here. We'll get to know them only when the device
- * attaches to the dsi host. For now, assume the bridge supports
- * DSI video mode
- */
- encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
+ encoder = msm_dsi->encoder;
/* link the internal dsi bridge to the external bridge */
- int_bridge->next = ext_bridge;
- /* set the external bridge's encoder as dsi's encoder */
- ext_bridge->encoder = encoder;
-
- drm_bridge_attach(dev, ext_bridge);
+ drm_bridge_attach(encoder, ext_bridge, int_bridge);
/*
* we need the drm_connector created by the external bridge
@@ -657,68 +763,6 @@ void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
{
}
-int msm_dsi_manager_phy_enable(int id,
- const unsigned long bit_rate, const unsigned long esc_rate,
- u32 *clk_pre, u32 *clk_post)
-{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct msm_dsi_phy *phy = msm_dsi->phy;
- int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
- struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
- int ret;
-
- ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate);
- if (ret)
- return ret;
-
- /*
- * Reset DSI PHY silently changes its PLL registers to reset status,
- * which will confuse clock driver and result in wrong output rate of
- * link clocks. Restore PLL status if its PLL is being used as clock
- * source.
- */
- if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) {
- ret = msm_dsi_pll_restore_state(pll);
- if (ret) {
- pr_err("%s: failed to restore pll state\n", __func__);
- msm_dsi_phy_disable(phy);
- return ret;
- }
- }
-
- msm_dsi->phy_enabled = true;
- msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
-
- return 0;
-}
-
-void msm_dsi_manager_phy_disable(int id)
-{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
- struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
- struct msm_dsi_phy *phy = msm_dsi->phy;
- struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
-
- /* Save PLL status if it is a clock source */
- if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER))
- msm_dsi_pll_save_state(pll);
-
- /* disable DSI phy
- * In dual-dsi configuration, the phy should be disabled for the
- * first controller only when the second controller is disabled.
- */
- msm_dsi->phy_enabled = false;
- if (IS_DUAL_DSI() && mdsi && sdsi) {
- if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
- msm_dsi_phy_disable(sdsi->phy);
- msm_dsi_phy_disable(mdsi->phy);
- }
- } else {
- msm_dsi_phy_disable(phy);
- }
-}
-
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@@ -782,6 +826,33 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
return true;
}
+void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_device *dev = msm_dsi->dev;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ struct drm_encoder *encoder;
+
+ /*
+ * drm_device pointer is assigned to msm_dsi only in the modeset_init
+ * path. If mipi_dsi_attach() happens in DSI driver's probe path
+ * (generally the case when we're connected to a drm_panel of the type
+ * mipi_dsi_device), this would be NULL. In such cases, try to set the
+ * encoder mode in the DSI connector's detect() op.
+ */
+ if (!dev)
+ return;
+
+ priv = dev->dev_private;
+ kms = priv->kms;
+ encoder = msm_dsi_get_encoder(msm_dsi);
+
+ if (encoder && kms->funcs->set_encoder_mode)
+ if (!(device_flags & MIPI_DSI_MODE_VIDEO))
+ kms->funcs->set_encoder_mode(kms, encoder, true);
+}
+
int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
{
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
@@ -806,7 +877,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
goto fail;
}
- ret = dsi_mgr_host_register(id);
+ ret = dsi_mgr_setup_components(id);
if (ret) {
pr_err("%s: failed to register mipi dsi host for DSI %d\n",
__func__, id);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index f39386ed75e4..0c2eb9c9a1fc 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -54,8 +54,10 @@ static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
}
int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
- const unsigned long bit_rate, const unsigned long esc_rate)
+ struct msm_dsi_phy_clk_request *clk_req)
{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, lpx;
s32 tmax, tmin;
s32 pcnt0 = 10;
@@ -115,8 +117,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
temp = 60 * coeff + 52 * ui - 24 * ui - temp;
tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
- timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
-
+ timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
+ false);
tmax = 63;
temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
@@ -124,17 +126,21 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
if (tmin > tmax) {
temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
- timing->clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre_inc_by_2 = true;
} else {
- timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre_inc_by_2 = false;
}
timing->ta_go = 3;
timing->ta_sure = 0;
timing->ta_get = 4;
- DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
- timing->clk_pre, timing->clk_post, timing->clk_zero,
+ DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
timing->clk_trail, timing->clk_prepare, timing->hs_exit,
timing->hs_zero, timing->hs_prepare, timing->hs_trail,
timing->hs_rqst);
@@ -142,6 +148,123 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
return 0;
}
+int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x8, lpx;
+ s32 tmax, tmin;
+ s32 pcnt0 = 50;
+ s32 pcnt1 = 50;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = 30;
+ s32 pcnt4 = 10;
+ s32 pcnt5 = 2;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 hb_en, hb_en_ckln, pd_ckln, pd;
+ s32 val, val_ckln;
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ timing->hs_halfbyte_en = 0;
+ hb_en = 0;
+ timing->hs_halfbyte_en_ckln = 0;
+ hb_en_ckln = 0;
+ timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
+ pd_ckln = timing->hs_prep_dly_ckln;
+ timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
+ pd = timing->hs_prep_dly;
+
+ val = (hb_en << 2) + (pd << 1);
+ val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x8 = ui << 3;
+ lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+ temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff - val_ckln * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+ temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
+ tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
+ tmax = (tmin > 255) ? 511 : 255;
+ timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+ temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
+ tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
+ tmax = 255;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+ timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+ timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+ temp = 60 * coeff + 52 * ui - 43 * ui;
+ tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ timing->shared_timings.clk_post =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
+ temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
+ temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+ (((timing->hs_rqst_ckln << 3) + 8) * ui);
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ if (tmin > tmax) {
+ temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre_inc_by_2 = 1;
+ } else {
+ timing->shared_timings.clk_pre =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+ timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+ timing->hs_prep_dly_ckln);
+
+ return 0;
+}
+
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask)
{
@@ -268,6 +391,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
{ .compatible = "qcom,dsi-phy-28nm-8960",
.data = &dsi_phy_28nm_8960_cfgs },
#endif
+#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
+ { .compatible = "qcom,dsi-phy-14nm",
+ .data = &dsi_phy_14nm_cfgs },
+#endif
{}
};
@@ -295,6 +422,24 @@ static int dsi_phy_get_id(struct msm_dsi_phy *phy)
return -EINVAL;
}
+int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ int ret = 0;
+
+ phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
+ "DSI_PHY_REG");
+ if (IS_ERR(phy->reg_base)) {
+ dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
+ __func__);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+fail:
+ return ret;
+}
+
static int dsi_phy_driver_probe(struct platform_device *pdev)
{
struct msm_dsi_phy *phy;
@@ -331,15 +476,6 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
}
- phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
- "DSI_PHY_REG");
- if (IS_ERR(phy->reg_base)) {
- dev_err(dev, "%s: failed to map phy regulator base\n",
- __func__);
- ret = -ENOMEM;
- goto fail;
- }
-
ret = dsi_phy_regulator_init(phy);
if (ret) {
dev_err(dev, "%s: failed to init regulator\n", __func__);
@@ -353,6 +489,12 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
}
+ if (phy->cfg->ops.init) {
+ ret = phy->cfg->ops.init(phy);
+ if (ret)
+ goto fail;
+ }
+
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
@@ -410,7 +552,7 @@ void __exit msm_dsi_phy_driver_unregister(void)
}
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
- const unsigned long bit_rate, const unsigned long esc_rate)
+ struct msm_dsi_phy_clk_request *clk_req)
{
struct device *dev = &phy->pdev->dev;
int ret;
@@ -418,21 +560,52 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
if (!phy || !phy->cfg->ops.enable)
return -EINVAL;
+ ret = dsi_phy_enable_resource(phy);
+ if (ret) {
+ dev_err(dev, "%s: resource enable failed, %d\n",
+ __func__, ret);
+ goto res_en_fail;
+ }
+
ret = dsi_phy_regulator_enable(phy);
if (ret) {
dev_err(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
- return ret;
+ goto reg_en_fail;
}
- ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
+ ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
if (ret) {
dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
- dsi_phy_regulator_disable(phy);
- return ret;
+ goto phy_en_fail;
+ }
+
+ /*
+ * Resetting DSI PHY silently changes its PLL registers to reset status,
+ * which will confuse clock driver and result in wrong output rate of
+ * link clocks. Restore PLL status if its PLL is being used as clock
+ * source.
+ */
+ if (phy->usecase != MSM_DSI_PHY_SLAVE) {
+ ret = msm_dsi_pll_restore_state(phy->pll);
+ if (ret) {
+ dev_err(dev, "%s: failed to restore pll state, %d\n",
+ __func__, ret);
+ goto pll_restor_fail;
+ }
}
return 0;
+
+pll_restor_fail:
+ if (phy->cfg->ops.disable)
+ phy->cfg->ops.disable(phy);
+phy_en_fail:
+ dsi_phy_regulator_disable(phy);
+reg_en_fail:
+ dsi_phy_disable_resource(phy);
+res_en_fail:
+ return ret;
}
void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
@@ -440,21 +613,21 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
if (!phy || !phy->cfg->ops.disable)
return;
+ /* Save PLL status if it is a clock source */
+ if (phy->usecase != MSM_DSI_PHY_SLAVE)
+ msm_dsi_pll_save_state(phy->pll);
+
phy->cfg->ops.disable(phy);
dsi_phy_regulator_disable(phy);
+ dsi_phy_disable_resource(phy);
}
-void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
- u32 *clk_pre, u32 *clk_post)
+void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_shared_timings *shared_timings)
{
- if (!phy)
- return;
-
- if (clk_pre)
- *clk_pre = phy->timing.clk_pre;
- if (clk_post)
- *clk_post = phy->timing.clk_post;
+ memcpy(shared_timings, &phy->timing.shared_timings,
+ sizeof(*shared_timings));
}
struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
@@ -465,3 +638,9 @@ struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
return phy->pll;
}
+void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
+ enum msm_dsi_phy_usecase uc)
+{
+ if (phy)
+ phy->usecase = uc;
+}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index f24a85439b94..1733f6608a09 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -22,8 +22,9 @@
#define dsi_phy_write(offset, data) msm_writel((data), (offset))
struct msm_dsi_phy_ops {
+ int (*init) (struct msm_dsi_phy *phy);
int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
- const unsigned long bit_rate, const unsigned long esc_rate);
+ struct msm_dsi_phy_clk_request *clk_req);
void (*disable)(struct msm_dsi_phy *phy);
};
@@ -46,6 +47,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_pre;
@@ -61,12 +63,22 @@ struct msm_dsi_dphy_timing {
u32 ta_go;
u32 ta_sure;
u32 ta_get;
+
+ struct msm_dsi_phy_shared_timings shared_timings;
+
+ /* For PHY v2 only */
+ u32 hs_rqst_ckln;
+ u32 hs_prep_dly;
+ u32 hs_prep_dly_ckln;
+ u8 hs_halfbyte_en;
+ u8 hs_halfbyte_en_ckln;
};
struct msm_dsi_phy {
struct platform_device *pdev;
void __iomem *base;
void __iomem *reg_base;
+ void __iomem *lane_base;
int id;
struct clk *ahb_clk;
@@ -75,6 +87,7 @@ struct msm_dsi_phy {
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;
+ enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode;
struct msm_dsi_pll *pll;
@@ -84,9 +97,12 @@ struct msm_dsi_phy {
* PHY internal functions
*/
int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
- const unsigned long bit_rate, const unsigned long esc_rate);
+ struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask);
+int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
#endif /* __DSI_PHY_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
new file mode 100644
index 000000000000..513f4234adc1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+#define PHY_14NM_CKLN_IDX 4
+
+static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing,
+ int lane_idx)
+{
+ void __iomem *base = phy->lane_base;
+ bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX);
+ u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero;
+ u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare;
+ u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail;
+ u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst;
+ u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly;
+ u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln :
+ timing->hs_halfbyte_en;
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx),
+ DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx),
+ halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0));
+}
+
+static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ u32 data;
+ int i;
+ int ret;
+ void __iomem *base = phy->base;
+ void __iomem *lane_base = phy->lane_base;
+
+ if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
+ dev_err(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ data = 0x1c;
+ if (phy->usecase != MSM_DSI_PHY_STANDALONE)
+ data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1);
+
+ /* 4 data lanes + 1 clk lane configuration */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i),
+ 0x1d);
+
+ dsi_phy_write(lane_base +
+ REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff);
+ dsi_phy_write(lane_base +
+ REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i),
+ (i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06);
+
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i),
+ (i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f);
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10);
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i),
+ 0);
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i),
+ 0x88);
+
+ dsi_14nm_dphy_set_timing(phy, timing, i);
+ }
+
+ /* Make sure PLL is not start */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ wmb(); /* make sure everything is written before reset and enable */
+
+ /* reset digital block */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80);
+ wmb(); /* ensure reset is asserted */
+ udelay(100);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00);
+
+ msm_dsi_phy_set_src_pll(phy, src_pll_id,
+ REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL,
+ DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+ ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+ if (ret) {
+ dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Remove power down from PLL and all lanes */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff);
+
+ return 0;
+}
+
+static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0);
+ dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0);
+
+ /* ensure that the phy is completely disabled */
+ wmb();
+}
+
+static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+
+ phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
+ "DSI_PHY_LANE");
+ if (IS_ERR(phy->lane_base)) {
+ dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
+ .type = MSM_DSI_PHY_14NM,
+ .src_pll_truthtable = { {false, false}, {true, false} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vcca", 17000, 32},
+ },
+ },
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .init = dsi_14nm_phy_init,
+ },
+ .io_start = { 0x994400, 0x996400 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index c757e2070cac..1ca6c69516f5 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -72,7 +72,7 @@ static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
}
static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
- const unsigned long bit_rate, const unsigned long esc_rate)
+ struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_dphy_timing *timing = &phy->timing;
int i;
@@ -81,7 +81,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
- if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
dev_err(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
@@ -145,6 +145,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.ops = {
.enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable,
+ .init = msm_dsi_phy_init_common,
},
.io_start = { 0xfd998300, 0xfd9a0300 },
.num_dsi_phy = 2,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 63d7fba31380..4972b52cbe44 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -67,7 +67,7 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
}
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
- const unsigned long bit_rate, const unsigned long esc_rate)
+ struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_dphy_timing *timing = &phy->timing;
int i;
@@ -75,7 +75,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
- if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
dev_err(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
@@ -144,6 +144,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
+ .init = msm_dsi_phy_init_common,
},
.io_start = { 0xfd922b00, 0xfd923100 },
.num_dsi_phy = 2,
@@ -161,6 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
+ .init = msm_dsi_phy_init_common,
},
.io_start = { 0x1a98500 },
.num_dsi_phy = 1,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 7bdb9de54968..398004463498 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -124,14 +124,14 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
}
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
- const unsigned long bit_rate, const unsigned long esc_rate)
+ struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_dphy_timing *timing = &phy->timing;
void __iomem *base = phy->base;
DBG("");
- if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
dev_err(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
@@ -191,6 +191,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
+ .init = msm_dsi_phy_init_common,
},
.io_start = { 0x4700300, 0x5800300 },
.num_dsi_phy = 2,
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 5cd438f91afe..bc289f5c9078 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -140,6 +140,15 @@ int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
return 0;
}
+int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
+ enum msm_dsi_phy_usecase uc)
+{
+ if (pll->set_usecase)
+ return pll->set_usecase(pll, uc);
+
+ return 0;
+}
+
struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id)
{
@@ -154,6 +163,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
case MSM_DSI_PHY_28NM_8960:
pll = msm_dsi_pll_28nm_8960_init(pdev, id);
break;
+ case MSM_DSI_PHY_14NM:
+ pll = msm_dsi_pll_14nm_init(pdev, id);
+ break;
default:
pll = ERR_PTR(-ENXIO);
break;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 2cf1664723e8..f63e7ada74a8 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -41,6 +41,8 @@ struct msm_dsi_pll {
void (*destroy)(struct msm_dsi_pll *pll);
void (*save_state)(struct msm_dsi_pll *pll);
int (*restore_state)(struct msm_dsi_pll *pll);
+ int (*set_usecase)(struct msm_dsi_pll *pll,
+ enum msm_dsi_phy_usecase uc);
};
#define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw)
@@ -104,5 +106,14 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(
}
#endif
+#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id);
+#else
+static inline struct msm_dsi_pll *
+msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
#endif /* __DSI_PLL_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
new file mode 100644
index 000000000000..fe15aa64086f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
@@ -0,0 +1,1104 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 14nm - clock diagram (eg: DSI0):
+ *
+ * dsi0n1_postdiv_clk
+ * |
+ * |
+ * +----+ | +----+
+ * dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
+ * +----+ | +----+
+ * | dsi0n1_postdivby2_clk
+ * | +----+ |
+ * o---| /2 |--o--|\
+ * | +----+ | \ +----+
+ * | | |--| n2 |-- dsi0pll
+ * o--------------| / +----+
+ * |/
+ */
+
+#define POLL_MAX_READS 15
+#define POLL_TIMEOUT_US 1000
+
+#define NUM_PROVIDED_CLKS 2
+
+#define VCO_REF_CLK_RATE 19200000
+#define VCO_MIN_RATE 1300000000UL
+#define VCO_MAX_RATE 2600000000UL
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+
+#define DSI_PLL_DEFAULT_VCO_POSTDIV 1
+
+struct dsi_pll_input {
+ u32 fref; /* reference clk */
+ u32 fdata; /* bit clock rate */
+ u32 dsiclk_sel; /* Mux configuration (see diagram) */
+ u32 ssc_en; /* SSC enable/disable */
+ u32 ldo_en;
+
+ /* fixed params */
+ u32 refclk_dbler_en;
+ u32 vco_measure_time;
+ u32 kvco_measure_time;
+ u32 bandgap_timer;
+ u32 pll_wakeup_timer;
+ u32 plllock_cnt;
+ u32 plllock_rng;
+ u32 ssc_center;
+ u32 ssc_adj_period;
+ u32 ssc_spread;
+ u32 ssc_freq;
+ u32 pll_ie_trim;
+ u32 pll_ip_trim;
+ u32 pll_iptat_trim;
+ u32 pll_cpcset_cur;
+ u32 pll_cpmset_cur;
+
+ u32 pll_icpmset;
+ u32 pll_icpcset;
+
+ u32 pll_icpmset_p;
+ u32 pll_icpmset_m;
+
+ u32 pll_icpcset_p;
+ u32 pll_icpcset_m;
+
+ u32 pll_lpf_res1;
+ u32 pll_lpf_cap1;
+ u32 pll_lpf_cap2;
+ u32 pll_c3ctrl;
+ u32 pll_r3ctrl;
+};
+
+struct dsi_pll_output {
+ u32 pll_txclk_en;
+ u32 dec_start;
+ u32 div_frac_start;
+ u32 ssc_period;
+ u32 ssc_step_size;
+ u32 plllock_cmp;
+ u32 pll_vco_div_ref;
+ u32 pll_vco_count;
+ u32 pll_kvco_div_ref;
+ u32 pll_kvco_count;
+ u32 pll_misc1;
+ u32 pll_lpf2_postdiv;
+ u32 pll_resetsm_cntrl;
+ u32 pll_resetsm_cntrl2;
+ u32 pll_resetsm_cntrl5;
+ u32 pll_kvco_code;
+
+ u32 cmn_clk_cfg0;
+ u32 cmn_clk_cfg1;
+ u32 cmn_ldo_cntrl;
+
+ u32 pll_postdiv;
+ u32 fcvo;
+};
+
+struct pll_14nm_cached_state {
+ unsigned long vco_rate;
+ u8 n2postdiv;
+ u8 n1postdiv;
+};
+
+struct dsi_pll_14nm {
+ struct msm_dsi_pll base;
+
+ int id;
+ struct platform_device *pdev;
+
+ void __iomem *phy_cmn_mmio;
+ void __iomem *mmio;
+
+ int vco_delay;
+
+ struct dsi_pll_input in;
+ struct dsi_pll_output out;
+
+ /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ u64 vco_current_rate;
+ u64 vco_ref_clk_rate;
+
+ /* private clocks: */
+ struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
+ u32 num_hws;
+
+ /* clock-provider: */
+ struct clk_hw_onecell_data *hw_data;
+
+ struct pll_14nm_cached_state cached_state;
+
+ enum msm_dsi_phy_usecase uc;
+ struct dsi_pll_14nm *slave;
+};
+
+#define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, base)
+
+/*
+ * Private struct for N1/N2 post-divider clocks. These clocks are similar to
+ * the generic clk_divider class of clocks. The only difference is that it
+ * also sets the slave DSI PLL's post-dividers if in Dual DSI mode
+ */
+struct dsi_pll_14nm_postdiv {
+ struct clk_hw hw;
+
+ /* divider params */
+ u8 shift;
+ u8 width;
+ u8 flags; /* same flags as used by clk_divider struct */
+
+ struct dsi_pll_14nm *pll;
+};
+
+#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
+
+static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
+ u32 nb_tries, u32 timeout_us)
+{
+ bool pll_locked = false;
+ void __iomem *base = pll_14nm->mmio;
+ u32 tries, val;
+
+ tries = nb_tries;
+ while (tries--) {
+ val = pll_read(base +
+ REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+ pll_locked = !!(val & BIT(5));
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout_us);
+ }
+
+ if (!pll_locked) {
+ tries = nb_tries;
+ while (tries--) {
+ val = pll_read(base +
+ REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+ pll_locked = !!(val & BIT(0));
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout_us);
+ }
+ }
+
+ DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+ return pll_locked;
+}
+
+static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll)
+{
+ pll->in.fref = pll->vco_ref_clk_rate;
+ pll->in.fdata = 0;
+ pll->in.dsiclk_sel = 1; /* Use the /2 path in Mux */
+ pll->in.ldo_en = 0; /* disabled for now */
+
+ /* fixed input */
+ pll->in.refclk_dbler_en = 0;
+ pll->in.vco_measure_time = 5;
+ pll->in.kvco_measure_time = 5;
+ pll->in.bandgap_timer = 4;
+ pll->in.pll_wakeup_timer = 5;
+ pll->in.plllock_cnt = 1;
+ pll->in.plllock_rng = 0;
+
+ /*
+ * SSC is enabled by default. We might need DT props for configuring
+ * some SSC params like PPM and center/down spread etc.
+ */
+ pll->in.ssc_en = 1;
+ pll->in.ssc_center = 0; /* down spread by default */
+ pll->in.ssc_spread = 5; /* PPM / 1000 */
+ pll->in.ssc_freq = 31500; /* default recommended */
+ pll->in.ssc_adj_period = 37;
+
+ pll->in.pll_ie_trim = 4;
+ pll->in.pll_ip_trim = 4;
+ pll->in.pll_cpcset_cur = 1;
+ pll->in.pll_cpmset_cur = 1;
+ pll->in.pll_icpmset = 4;
+ pll->in.pll_icpcset = 4;
+ pll->in.pll_icpmset_p = 0;
+ pll->in.pll_icpmset_m = 0;
+ pll->in.pll_icpcset_p = 0;
+ pll->in.pll_icpcset_m = 0;
+ pll->in.pll_lpf_res1 = 3;
+ pll->in.pll_lpf_cap1 = 11;
+ pll->in.pll_lpf_cap2 = 1;
+ pll->in.pll_iptat_trim = 7;
+ pll->in.pll_c3ctrl = 2;
+ pll->in.pll_r3ctrl = 1;
+}
+
+#define CEIL(x, y) (((x) + ((y) - 1)) / (y))
+
+static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll)
+{
+ u32 period, ssc_period;
+ u32 ref, rem;
+ u64 step_size;
+
+ DBG("vco=%lld ref=%lld", pll->vco_current_rate, pll->vco_ref_clk_rate);
+
+ ssc_period = pll->in.ssc_freq / 500;
+ period = (u32)pll->vco_ref_clk_rate / 1000;
+ ssc_period = CEIL(period, ssc_period);
+ ssc_period -= 1;
+ pll->out.ssc_period = ssc_period;
+
+ DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq,
+ pll->in.ssc_spread, pll->out.ssc_period);
+
+ step_size = (u32)pll->vco_current_rate;
+ ref = pll->vco_ref_clk_rate;
+ ref /= 1000;
+ step_size = div_u64(step_size, ref);
+ step_size <<= 20;
+ step_size = div_u64(step_size, 1000);
+ step_size *= pll->in.ssc_spread;
+ step_size = div_u64(step_size, 1000);
+ step_size *= (pll->in.ssc_adj_period + 1);
+
+ rem = 0;
+ step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
+ if (rem)
+ step_size++;
+
+ DBG("step_size=%lld", step_size);
+
+ step_size &= 0x0ffff; /* take lower 16 bits */
+
+ pll->out.ssc_step_size = step_size;
+}
+
+static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll)
+{
+ struct dsi_pll_input *pin = &pll->in;
+ struct dsi_pll_output *pout = &pll->out;
+ u64 multiplier = BIT(20);
+ u64 dec_start_multiple, dec_start, pll_comp_val;
+ u32 duration, div_frac_start;
+ u64 vco_clk_rate = pll->vco_current_rate;
+ u64 fref = pll->vco_ref_clk_rate;
+
+ DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
+
+ dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
+ div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+ dec_start = div_u64(dec_start_multiple, multiplier);
+
+ pout->dec_start = (u32)dec_start;
+ pout->div_frac_start = div_frac_start;
+
+ if (pin->plllock_cnt == 0)
+ duration = 1024;
+ else if (pin->plllock_cnt == 1)
+ duration = 256;
+ else if (pin->plllock_cnt == 2)
+ duration = 128;
+ else
+ duration = 32;
+
+ pll_comp_val = duration * dec_start_multiple;
+ pll_comp_val = div_u64(pll_comp_val, multiplier);
+ do_div(pll_comp_val, 10);
+
+ pout->plllock_cmp = (u32)pll_comp_val;
+
+ pout->pll_txclk_en = 1;
+ pout->cmn_ldo_cntrl = 0x3c;
+}
+
+static u32 pll_14nm_kvco_slop(u32 vrate)
+{
+ u32 slop = 0;
+
+ if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
+ slop = 600;
+ else if (vrate > 1800000000UL && vrate < 2300000000UL)
+ slop = 400;
+ else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
+ slop = 280;
+
+ return slop;
+}
+
+static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll)
+{
+ struct dsi_pll_input *pin = &pll->in;
+ struct dsi_pll_output *pout = &pll->out;
+ u64 vco_clk_rate = pll->vco_current_rate;
+ u64 fref = pll->vco_ref_clk_rate;
+ u64 data;
+ u32 cnt;
+
+ data = fref * pin->vco_measure_time;
+ do_div(data, 1000000);
+ data &= 0x03ff; /* 10 bits */
+ data -= 2;
+ pout->pll_vco_div_ref = data;
+
+ data = div_u64(vco_clk_rate, 1000000); /* unit is Mhz */
+ data *= pin->vco_measure_time;
+ do_div(data, 10);
+ pout->pll_vco_count = data;
+
+ data = fref * pin->kvco_measure_time;
+ do_div(data, 1000000);
+ data &= 0x03ff; /* 10 bits */
+ data -= 1;
+ pout->pll_kvco_div_ref = data;
+
+ cnt = pll_14nm_kvco_slop(vco_clk_rate);
+ cnt *= 2;
+ cnt /= 100;
+ cnt *= pin->kvco_measure_time;
+ pout->pll_kvco_count = cnt;
+
+ pout->pll_misc1 = 16;
+ pout->pll_resetsm_cntrl = 48;
+ pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
+ pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
+ pout->pll_kvco_code = 0;
+}
+
+static void pll_db_commit_ssc(struct dsi_pll_14nm *pll)
+{
+ void __iomem *base = pll->mmio;
+ struct dsi_pll_input *pin = &pll->in;
+ struct dsi_pll_output *pout = &pll->out;
+ u8 data;
+
+ data = pin->ssc_adj_period;
+ data &= 0x0ff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
+ data = (pin->ssc_adj_period >> 8);
+ data &= 0x03;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
+
+ data = pout->ssc_period;
+ data &= 0x0ff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
+ data = (pout->ssc_period >> 8);
+ data &= 0x0ff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
+
+ data = pout->ssc_step_size;
+ data &= 0x0ff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
+ data = (pout->ssc_step_size >> 8);
+ data &= 0x0ff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
+
+ data = (pin->ssc_center & 0x01);
+ data <<= 1;
+ data |= 0x01; /* enable */
+ pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
+
+ wmb(); /* make sure register committed */
+}
+
+static void pll_db_commit_common(struct dsi_pll_14nm *pll,
+ struct dsi_pll_input *pin,
+ struct dsi_pll_output *pout)
+{
+ void __iomem *base = pll->mmio;
+ u8 data;
+
+ /* confgiure the non frequency dependent pll registers */
+ data = 0;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
+
+ data = pout->pll_txclk_en;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data);
+
+ data = pout->pll_resetsm_cntrl;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data);
+ data = pout->pll_resetsm_cntrl2;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data);
+ data = pout->pll_resetsm_cntrl5;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data);
+
+ data = pout->pll_vco_div_ref & 0xff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
+ data = (pout->pll_vco_div_ref >> 8) & 0x3;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
+
+ data = pout->pll_kvco_div_ref & 0xff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
+ data = (pout->pll_kvco_div_ref >> 8) & 0x3;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
+
+ data = pout->pll_misc1;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data);
+
+ data = pin->pll_ie_trim;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data);
+
+ data = pin->pll_ip_trim;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data);
+
+ data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data);
+
+ data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data);
+
+ data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data);
+
+ data = pin->pll_icpmset << 3 | pin->pll_icpcset;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data);
+
+ data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data);
+
+ data = pin->pll_iptat_trim;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data);
+
+ data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data);
+}
+
+static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
+{
+ void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+
+ /* de assert pll start and apply pll sw reset */
+
+ /* stop pll */
+ pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+
+ /* pll sw reset */
+ pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
+ wmb(); /* make sure register committed */
+
+ pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
+ wmb(); /* make sure register committed */
+}
+
+static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
+ struct dsi_pll_input *pin,
+ struct dsi_pll_output *pout)
+{
+ void __iomem *base = pll->mmio;
+ void __iomem *cmn_base = pll->phy_cmn_mmio;
+ u8 data;
+
+ DBG("DSI%d PLL", pll->id);
+
+ data = pout->cmn_ldo_cntrl;
+ pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
+
+ pll_db_commit_common(pll, pin, pout);
+
+ pll_14nm_software_reset(pll);
+
+ data = pin->dsiclk_sel; /* set dsiclk_sel = 1 */
+ pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data);
+
+ data = 0xff; /* data, clk, pll normal operation */
+ pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
+
+ /* configure the frequency dependent pll registers */
+ data = pout->dec_start;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
+
+ data = pout->div_frac_start & 0xff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
+ data = (pout->div_frac_start >> 8) & 0xff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
+ data = (pout->div_frac_start >> 16) & 0xf;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
+
+ data = pout->plllock_cmp & 0xff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
+
+ data = (pout->plllock_cmp >> 8) & 0xff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
+
+ data = (pout->plllock_cmp >> 16) & 0x3;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
+
+ data = pin->plllock_cnt << 1 | pin->plllock_rng << 3;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
+
+ data = pout->pll_vco_count & 0xff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
+ data = (pout->pll_vco_count >> 8) & 0xff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
+
+ data = pout->pll_kvco_count & 0xff;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
+ data = (pout->pll_kvco_count >> 8) & 0x3;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
+
+ data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1;
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data);
+
+ if (pin->ssc_en)
+ pll_db_commit_ssc(pll);
+
+ wmb(); /* make sure register committed */
+}
+
+/*
+ * VCO clock Callbacks
+ */
+static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+ struct dsi_pll_input *pin = &pll_14nm->in;
+ struct dsi_pll_output *pout = &pll_14nm->out;
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate,
+ parent_rate);
+
+ pll_14nm->vco_current_rate = rate;
+ pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
+
+ dsi_pll_14nm_input_init(pll_14nm);
+
+ /*
+ * This configures the post divider internal to the VCO. It's
+ * fixed to divide by 1 for now.
+ *
+ * tx_band = pll_postdiv.
+ * 0: divided by 1
+ * 1: divided by 2
+ * 2: divided by 4
+ * 3: divided by 8
+ */
+ pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV;
+
+ pll_14nm_dec_frac_calc(pll_14nm);
+
+ if (pin->ssc_en)
+ pll_14nm_ssc_calc(pll_14nm);
+
+ pll_14nm_calc_vco_count(pll_14nm);
+
+ /* commit the slave DSI PLL registers if we're master. Note that we
+ * don't lock the slave PLL. We just ensure that the PLL/PHY registers
+ * of the master and slave are identical
+ */
+ if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+ struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+
+ pll_db_commit_14nm(pll_14nm_slave, pin, pout);
+ }
+
+ pll_db_commit_14nm(pll_14nm, pin, pout);
+
+ return 0;
+}
+
+static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+ void __iomem *base = pll_14nm->mmio;
+ u64 vco_rate, multiplier = BIT(20);
+ u32 div_frac_start;
+ u32 dec_start;
+ u64 ref_clk = parent_rate;
+
+ dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
+ dec_start &= 0x0ff;
+
+ DBG("dec_start = %x", dec_start);
+
+ div_frac_start = (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
+ & 0xf) << 16;
+ div_frac_start |= (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
+ & 0xff) << 8;
+ div_frac_start |= pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
+ & 0xff;
+
+ DBG("div_frac_start = %x", div_frac_start);
+
+ vco_rate = ref_clk * dec_start;
+
+ vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+ /*
+ * Recalculating the rate from dec_start and frac_start doesn't end up
+ * the rate we originally set. Convert the freq to KHz, round it up and
+ * convert it back to MHz.
+ */
+ vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
+
+ DBG("returning vco rate = %lu", (unsigned long)vco_rate);
+
+ return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
+ .round_rate = msm_dsi_pll_helper_clk_round_rate,
+ .set_rate = dsi_pll_14nm_vco_set_rate,
+ .recalc_rate = dsi_pll_14nm_vco_recalc_rate,
+ .prepare = msm_dsi_pll_helper_clk_prepare,
+ .unprepare = msm_dsi_pll_helper_clk_unprepare,
+};
+
+/*
+ * N1 and N2 post-divider clock callbacks
+ */
+#define div_mask(width) ((1 << (width)) - 1)
+static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+ struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+ void __iomem *base = pll_14nm->phy_cmn_mmio;
+ u8 shift = postdiv->shift;
+ u8 width = postdiv->width;
+ u32 val;
+
+ DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate);
+
+ val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
+ val &= div_mask(width);
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL,
+ postdiv->flags);
+}
+
+static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+ struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+
+ DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate);
+
+ return divider_round_rate(hw, rate, prate, NULL,
+ postdiv->width,
+ postdiv->flags);
+}
+
+static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+ struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+ void __iomem *base = pll_14nm->phy_cmn_mmio;
+ spinlock_t *lock = &pll_14nm->postdiv_lock;
+ u8 shift = postdiv->shift;
+ u8 width = postdiv->width;
+ unsigned int value;
+ unsigned long flags = 0;
+ u32 val;
+
+ DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate,
+ parent_rate);
+
+ value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
+ postdiv->flags);
+
+ spin_lock_irqsave(lock, flags);
+
+ val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+ val &= ~(div_mask(width) << shift);
+
+ val |= value << shift;
+ pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+
+ /* If we're master in dual DSI mode, then the slave PLL's post-dividers
+ * follow the master's post dividers
+ */
+ if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+ struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+ void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
+
+ pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+ }
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
+ .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
+ .round_rate = dsi_pll_14nm_postdiv_round_rate,
+ .set_rate = dsi_pll_14nm_postdiv_set_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+ void __iomem *base = pll_14nm->mmio;
+ void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+ bool locked;
+
+ DBG("");
+
+ pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
+ pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
+
+ locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
+ POLL_TIMEOUT_US);
+
+ if (unlikely(!locked))
+ dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
+ else
+ DBG("DSI PLL lock success");
+
+ return locked ? 0 : -EINVAL;
+}
+
+static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+ void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+
+ DBG("");
+
+ pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+}
+
+static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+ struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+ void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+ u32 data;
+
+ data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+
+ cached_state->n1postdiv = data & 0xf;
+ cached_state->n2postdiv = (data >> 4) & 0xf;
+
+ DBG("DSI%d PLL save state %x %x", pll_14nm->id,
+ cached_state->n1postdiv, cached_state->n2postdiv);
+
+ cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
+}
+
+static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+ struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+ void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
+ u32 data;
+ int ret;
+
+ ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ dev_err(&pll_14nm->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
+
+ DBG("DSI%d PLL restore state %x %x", pll_14nm->id,
+ cached_state->n1postdiv, cached_state->n2postdiv);
+
+ pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+
+ /* also restore post-dividers for slave DSI PLL */
+ if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
+ struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+ void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
+
+ pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+ }
+
+ return 0;
+}
+
+static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll,
+ enum msm_dsi_phy_usecase uc)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+ void __iomem *base = pll_14nm->mmio;
+ u32 clkbuflr_en, bandgap = 0;
+
+ switch (uc) {
+ case MSM_DSI_PHY_STANDALONE:
+ clkbuflr_en = 0x1;
+ break;
+ case MSM_DSI_PHY_MASTER:
+ clkbuflr_en = 0x3;
+ pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ clkbuflr_en = 0x0;
+ bandgap = 0x3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
+ if (bandgap)
+ pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
+
+ pll_14nm->uc = uc;
+
+ return 0;
+}
+
+static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll,
+ struct clk **byte_clk_provider,
+ struct clk **pixel_clk_provider)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+ struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data;
+
+ if (byte_clk_provider)
+ *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+ if (pixel_clk_provider)
+ *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+ return 0;
+}
+
+static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
+ struct platform_device *pdev = pll_14nm->pdev;
+ int num_hws = pll_14nm->num_hws;
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ while (num_hws--)
+ clk_hw_unregister(pll_14nm->hws[num_hws]);
+}
+
+static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ u8 shift)
+{
+ struct dsi_pll_14nm_postdiv *pll_postdiv;
+ struct device *dev = &pll_14nm->pdev->dev;
+ struct clk_init_data postdiv_init = {
+ .parent_names = (const char *[]) { parent_name },
+ .num_parents = 1,
+ .name = name,
+ .flags = flags,
+ .ops = &clk_ops_dsi_pll_14nm_postdiv,
+ };
+ int ret;
+
+ pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
+ if (!pll_postdiv)
+ return ERR_PTR(-ENOMEM);
+
+ pll_postdiv->pll = pll_14nm;
+ pll_postdiv->shift = shift;
+ /* both N1 and N2 postdividers are 4 bits wide */
+ pll_postdiv->width = 4;
+ /* range of each divider is from 1 to 15 */
+ pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
+ pll_postdiv->hw.init = &postdiv_init;
+
+ ret = clk_hw_register(dev, &pll_postdiv->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &pll_postdiv->hw;
+}
+
+static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
+{
+ char clk_name[32], parent[32], vco_name[32];
+ struct clk_init_data vco_init = {
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .name = vco_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_14nm_vco,
+ };
+ struct device *dev = &pll_14nm->pdev->dev;
+ struct clk_hw **hws = pll_14nm->hws;
+ struct clk_hw_onecell_data *hw_data;
+ struct clk_hw *hw;
+ int num = 0;
+ int ret;
+
+ DBG("DSI%d", pll_14nm->id);
+
+ hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+ NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id);
+ pll_14nm->base.clk_hw.init = &vco_init;
+
+ ret = clk_hw_register(dev, &pll_14nm->base.clk_hw);
+ if (ret)
+ return ret;
+
+ hws[num++] = &pll_14nm->base.clk_hw;
+
+ snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+ snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id);
+
+ /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
+ hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
+ CLK_SET_RATE_PARENT, 0);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id);
+ snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+
+ /* DSI Byte clock = VCO_CLK / N1 / 8 */
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ CLK_SET_RATE_PARENT, 1, 8);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+ hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
+ snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
+
+ /*
+ * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
+ * on the way. Don't let it set parent.
+ */
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id);
+ snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
+
+ /* DSI pixel clock = VCO_CLK / N1 / 2 / N2
+ * This is the output of N2 post-divider, bits 4-7 in
+ * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
+ */
+ hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+ hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
+
+ pll_14nm->num_hws = num;
+
+ hw_data->num = NUM_PROVIDED_CLKS;
+ pll_14nm->hw_data = hw_data;
+
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ pll_14nm->hw_data);
+ if (ret) {
+ dev_err(dev, "failed to register clk provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
+{
+ struct dsi_pll_14nm *pll_14nm;
+ struct msm_dsi_pll *pll;
+ int ret;
+
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
+ if (!pll_14nm)
+ return ERR_PTR(-ENOMEM);
+
+ DBG("PLL%d", id);
+
+ pll_14nm->pdev = pdev;
+ pll_14nm->id = id;
+ pll_14nm_list[id] = pll_14nm;
+
+ pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+ if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
+ dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+ if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
+ dev_err(&pdev->dev, "failed to map PLL base\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&pll_14nm->postdiv_lock);
+
+ pll = &pll_14nm->base;
+ pll->min_rate = VCO_MIN_RATE;
+ pll->max_rate = VCO_MAX_RATE;
+ pll->get_provider = dsi_pll_14nm_get_provider;
+ pll->destroy = dsi_pll_14nm_destroy;
+ pll->disable_seq = dsi_pll_14nm_disable_seq;
+ pll->save_state = dsi_pll_14nm_save_state;
+ pll->restore_state = dsi_pll_14nm_restore_state;
+ pll->set_usecase = dsi_pll_14nm_set_usecase;
+
+ pll_14nm->vco_delay = 1;
+
+ pll->en_seq_cnt = 1;
+ pll->enable_seqs[0] = dsi_pll_14nm_enable_seq;
+
+ ret = pll_14nm_register(pll_14nm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ return pll;
+}
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index 2bc73f82f3f5..931a5c97cccf 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -106,7 +106,7 @@ struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)
bridge = &edp_bridge->base;
bridge->funcs = &edp_bridge_funcs;
- ret = drm_bridge_attach(edp->dev, bridge);
+ ret = drm_bridge_attach(edp->encoder, bridge, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index bacbd5d8df0e..4e6d1bf27474 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -227,7 +227,7 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge = &hdmi_bridge->base;
bridge->funcs = &msm_hdmi_bridge_funcs;
- ret = drm_bridge_attach(hdmi->dev, bridge);
+ ret = drm_bridge_attach(hdmi->encoder, bridge, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index b782efd4b95f..94ea963519b2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -260,8 +260,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
struct drm_encoder *encoder;
struct drm_connector *connector;
struct device_node *panel_node;
- struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
- int i, dsi_id;
+ int dsi_id;
int ret;
switch (intf_type) {
@@ -322,22 +321,19 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
if (!priv->dsi[dsi_id])
break;
- for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
- dsi_encs[i] = mdp4_dsi_encoder_init(dev);
- if (IS_ERR(dsi_encs[i])) {
- ret = PTR_ERR(dsi_encs[i]);
- dev_err(dev->dev,
- "failed to construct DSI encoder: %d\n",
- ret);
- return ret;
- }
-
- /* TODO: Add DMA_S later? */
- dsi_encs[i]->possible_crtcs = 1 << DMA_P;
- priv->encoders[priv->num_encoders++] = dsi_encs[i];
+ encoder = mdp4_dsi_encoder_init(dev);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ dev_err(dev->dev,
+ "failed to construct DSI encoder: %d\n", ret);
+ return ret;
}
- ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
+ /* TODO: Add DMA_S later? */
+ encoder->possible_crtcs = 1 << DMA_P;
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (ret) {
dev_err(dev->dev, "failed to initialize DSI: %d\n",
ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 911e4690d36a..53619d07677e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -43,7 +43,7 @@ enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb)
if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
is_tile = true;
- if (fb->pixel_format == DRM_FORMAT_NV12 && is_tile)
+ if (fb->format->format == DRM_FORMAT_NV12 && is_tile)
return FRAME_TILE_YCBCR_420;
return FRAME_LINEAR;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 27d5371acee0..e6dfc518d4db 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
-
-Copyright (C) 2013-2016 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-01-11 05:19:19)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
+- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55)
+
+Copyright (C) 2013-2017 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -65,16 +57,19 @@ enum mdp5_intfnum {
};
enum mdp5_pipe {
- SSPP_VIG0 = 0,
- SSPP_VIG1 = 1,
- SSPP_VIG2 = 2,
- SSPP_RGB0 = 3,
- SSPP_RGB1 = 4,
- SSPP_RGB2 = 5,
- SSPP_DMA0 = 6,
- SSPP_DMA1 = 7,
- SSPP_VIG3 = 8,
- SSPP_RGB3 = 9,
+ SSPP_NONE = 0,
+ SSPP_VIG0 = 1,
+ SSPP_VIG1 = 2,
+ SSPP_VIG2 = 3,
+ SSPP_RGB0 = 4,
+ SSPP_RGB1 = 5,
+ SSPP_RGB2 = 6,
+ SSPP_DMA0 = 7,
+ SSPP_DMA1 = 8,
+ SSPP_VIG3 = 9,
+ SSPP_RGB3 = 10,
+ SSPP_CURSOR0 = 11,
+ SSPP_CURSOR1 = 12,
};
enum mdp5_ctl_mode {
@@ -532,6 +527,7 @@ static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id va
static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
{
switch (idx) {
+ case SSPP_NONE: return (INVALID_IDX(idx));
case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]);
case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]);
case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]);
@@ -542,6 +538,8 @@ static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]);
case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]);
case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]);
+ case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]);
+ case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]);
default: return INVALID_IDX(idx);
}
}
@@ -1073,6 +1071,10 @@ static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000
#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080
+#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000
static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); }
#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 618b2ffed9b4..34ab553f6897 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -421,6 +421,16 @@ const struct mdp5_cfg_hw msm8x96_config = {
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
+ .pipe_cursor = {
+ .count = 2,
+ .base = { 0x34000, 0x36000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
.lm = {
.count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 050e1618c836..b1c7daaede86 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -32,7 +32,7 @@ extern const struct mdp5_cfg_hw *mdp5_cfg;
typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
#define MDP5_SUB_BLOCK_DEFINITION \
- int count; \
+ unsigned int count; \
uint32_t base[MAX_BASES]
struct mdp5_sub_block {
@@ -85,6 +85,7 @@ struct mdp5_cfg_hw {
struct mdp5_pipe_block pipe_vig;
struct mdp5_pipe_block pipe_rgb;
struct mdp5_pipe_block pipe_dma;
+ struct mdp5_pipe_block pipe_cursor;
struct mdp5_lm_block lm;
struct mdp5_sub_block dspp;
struct mdp5_sub_block ad;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index c627ab6d0061..df1c8adec3f3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -16,16 +16,6 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
-struct mdp5_cmd_encoder {
- struct drm_encoder base;
- struct mdp5_interface intf;
- bool enabled;
- uint32_t bsc;
-
- struct mdp5_ctl *ctl;
-};
-#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
-
static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
@@ -36,47 +26,8 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
#include <mach/board.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
-#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
- { \
- .src = MSM_BUS_MASTER_MDP_PORT0, \
- .dst = MSM_BUS_SLAVE_EBI_CH0, \
- .ab = (ab_val), \
- .ib = (ib_val), \
- }
-
-static struct msm_bus_vectors mdp_bus_vectors[] = {
- MDP_BUS_VECTOR_ENTRY(0, 0),
- MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
-};
-static struct msm_bus_paths mdp_bus_usecases[] = { {
- .num_paths = 1,
- .vectors = &mdp_bus_vectors[0],
-}, {
- .num_paths = 1,
- .vectors = &mdp_bus_vectors[1],
-} };
-static struct msm_bus_scale_pdata mdp_bus_scale_table = {
- .usecase = mdp_bus_usecases,
- .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
- .name = "mdss_mdp",
-};
-
-static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc)
-{
- mdp5_cmd_enc->bsc = msm_bus_scale_register_client(
- &mdp_bus_scale_table);
- DBG("bus scale client: %08x", mdp5_cmd_enc->bsc);
-}
-
-static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc)
-{
- if (mdp5_cmd_enc->bsc) {
- msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc);
- mdp5_cmd_enc->bsc = 0;
- }
-}
-static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx)
+static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx)
{
if (mdp5_cmd_enc->bsc) {
DBG("set bus scaling: %d", idx);
@@ -89,14 +40,12 @@ static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx)
}
}
#else
-static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
-static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
-static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {}
+static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {}
#endif
#define VSYNC_CLK_RATE 19200000
static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct device *dev = encoder->dev->dev;
@@ -176,23 +125,11 @@ static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
clk_disable_unprepare(mdp5_kms->vsync_clk);
}
-static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
- bs_fini(mdp5_cmd_enc);
- drm_encoder_cleanup(encoder);
- kfree(mdp5_cmd_enc);
-}
-
-static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = {
- .destroy = mdp5_cmd_encoder_destroy,
-};
-
-static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
mode = adjusted_mode;
@@ -209,9 +146,9 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
mdp5_cmd_enc->ctl);
}
-static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
{
- struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
@@ -228,9 +165,9 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
mdp5_cmd_enc->enabled = false;
}
-static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
+void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
{
- struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
@@ -248,16 +185,10 @@ static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
mdp5_cmd_enc->enabled = true;
}
-static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = {
- .mode_set = mdp5_cmd_encoder_mode_set,
- .disable = mdp5_cmd_encoder_disable,
- .enable = mdp5_cmd_encoder_enable,
-};
-
int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
- struct drm_encoder *slave_encoder)
+ struct drm_encoder *slave_encoder)
{
- struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
+ struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms;
int intf_num;
u32 data = 0;
@@ -292,43 +223,3 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
return 0;
}
-
-/* initialize command mode encoder */
-struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf, struct mdp5_ctl *ctl)
-{
- struct drm_encoder *encoder = NULL;
- struct mdp5_cmd_encoder *mdp5_cmd_enc;
- int ret;
-
- if (WARN_ON((intf->type != INTF_DSI) &&
- (intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) {
- ret = -EINVAL;
- goto fail;
- }
-
- mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL);
- if (!mdp5_cmd_enc) {
- ret = -ENOMEM;
- goto fail;
- }
-
- memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
- encoder = &mdp5_cmd_enc->base;
- mdp5_cmd_enc->ctl = ctl;
-
- drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
-
- drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs);
-
- bs_init(mdp5_cmd_enc);
-
- return encoder;
-
-fail:
- if (encoder)
- mdp5_cmd_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 1ce8a01a5a28..d0c8b38b96ce 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -177,6 +177,21 @@ static void mdp5_crtc_destroy(struct drm_crtc *crtc)
kfree(mdp5_crtc);
}
+static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
+{
+ switch (stage) {
+ case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
+ case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
+ case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
+ case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
+ case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
+ case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
+ case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
+ default:
+ return 0;
+ }
+}
+
/*
* blend_setup() - blend all the planes of a CRTC
*
@@ -195,8 +210,10 @@ static void blend_setup(struct drm_crtc *crtc)
uint32_t lm = mdp5_crtc->lm;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
- uint8_t stage[STAGE_MAX + 1];
+ enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE };
int i, plane_cnt = 0;
+ bool bg_alpha_enabled = false;
+ u32 mixer_op_mode = 0;
#define blender(stage) ((stage) - STAGE0)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
@@ -218,6 +235,11 @@ static void blend_setup(struct drm_crtc *crtc)
if (!pstates[STAGE_BASE]) {
ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
DBG("Border Color is enabled");
+ } else if (plane_cnt) {
+ format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
+
+ if (format->alpha_enable)
+ bg_alpha_enabled = true;
}
/* The reset for blending */
@@ -232,6 +254,12 @@ static void blend_setup(struct drm_crtc *crtc)
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
fg_alpha = pstates[i]->alpha;
bg_alpha = 0xFF - pstates[i]->alpha;
+
+ if (!format->alpha_enable && bg_alpha_enabled)
+ mixer_op_mode = 0;
+ else
+ mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
+
DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
if (format->alpha_enable && pstates[i]->premultiplied) {
@@ -268,6 +296,8 @@ static void blend_setup(struct drm_crtc *crtc)
blender(i)), bg_alpha);
}
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode);
+
mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
out:
@@ -370,6 +400,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate;
+ bool cursor_plane = false;
int cnt = 0, base = 0, i;
DBG("%s: check", crtc->name);
@@ -379,6 +410,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
pstates[cnt].state = to_mdp5_plane_state(pstate);
cnt++;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ cursor_plane = true;
}
/* assign a stage based on sorted zpos property */
@@ -390,6 +424,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
base++;
+ /* trigger a warning if cursor isn't the highest zorder */
+ WARN_ON(cursor_plane &&
+ (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
+
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
@@ -401,7 +439,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
}
for (i = 0; i < cnt; i++) {
- pstates[i].state->stage = STAGE_BASE + i + base;
+ if (cursor_plane && (i == (cnt - 1)))
+ pstates[i].state->stage = hw_cfg->lm.nb_stages;
+ else
+ pstates[i].state->stage = STAGE_BASE + i + base;
DBG("%s: assign pipe %s on stage=%d", crtc->name,
pstates[i].plane->name,
pstates[i].state->stage);
@@ -612,6 +653,16 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.cursor_move = mdp5_crtc_cursor_move,
};
+static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = mdp5_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_property = drm_atomic_helper_crtc_set_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
.disable = mdp5_crtc_disable,
@@ -727,6 +778,13 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
mdp5_ctl_set_pipeline(ctl, intf, lm);
}
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ return mdp5_crtc->ctl;
+}
+
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -745,7 +803,8 @@ void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
/* initialize crtc */
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, int id)
+ struct drm_plane *plane,
+ struct drm_plane *cursor_plane, int id)
{
struct drm_crtc *crtc = NULL;
struct mdp5_crtc *mdp5_crtc;
@@ -766,8 +825,12 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
- drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs,
- NULL);
+ if (cursor_plane)
+ drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
+ &mdp5_crtc_no_lm_cursor_funcs, NULL);
+ else
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &mdp5_crtc_funcs, NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index d021edc3b307..8b93f7e13200 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -326,6 +326,8 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+ case SSPP_CURSOR0:
+ case SSPP_CURSOR1:
default: return 0;
}
}
@@ -333,7 +335,7 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
enum mdp_mixer_stage_id stage)
{
- if (stage < STAGE6)
+ if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
return 0;
switch (pipe) {
@@ -347,12 +349,14 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
+ case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
+ case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
default: return 0;
}
}
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
- u32 ctl_blend_op_flags)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
+ u32 ctl_blend_op_flags)
{
unsigned long flags;
u32 blend_cfg = 0, blend_ext_cfg = 0;
@@ -365,7 +369,7 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
start_stage = STAGE_BASE;
}
- for (i = start_stage; i < start_stage + stage_cnt; i++) {
+ for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
}
@@ -422,6 +426,8 @@ u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
+ case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
+ case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
default: return 0;
}
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 96148c6f863c..fda00d33e4db 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -56,8 +56,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
- u32 ctl_blend_op_flags);
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
+ u32 ctl_blend_op_flags);
/**
* mdp_ctl_flush_mask...() - Register FLUSH masks
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index fe0c22230883..80fa482ae8ed 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -21,17 +21,6 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
-struct mdp5_encoder {
- struct drm_encoder base;
- struct mdp5_interface intf;
- spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
- bool enabled;
- uint32_t bsc;
-
- struct mdp5_ctl *ctl;
-};
-#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
-
static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
@@ -112,9 +101,9 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = {
.destroy = mdp5_encoder_destroy,
};
-static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
@@ -221,7 +210,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
mdp5_encoder->ctl);
}
-static void mdp5_encoder_disable(struct drm_encoder *encoder)
+static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
@@ -256,7 +245,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
mdp5_encoder->enabled = false;
}
-static void mdp5_encoder_enable(struct drm_encoder *encoder)
+static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
@@ -279,6 +268,41 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
mdp5_encoder->enabled = true;
}
+static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = &mdp5_encoder->intf;
+
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
+ else
+ mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode);
+}
+
+static void mdp5_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = &mdp5_encoder->intf;
+
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_cmd_encoder_disable(encoder);
+ else
+ mdp5_vid_encoder_disable(encoder);
+}
+
+static void mdp5_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = &mdp5_encoder->intf;
+
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_cmd_encoder_disable(encoder);
+ else
+ mdp5_vid_encoder_enable(encoder);
+}
+
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.mode_set = mdp5_encoder_mode_set,
.disable = mdp5_encoder_disable,
@@ -303,8 +327,8 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
}
-int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
- struct drm_encoder *slave_encoder)
+int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
@@ -342,6 +366,23 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
return 0;
}
+void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = &mdp5_encoder->intf;
+
+ /* TODO: Expand this to set writeback modes too */
+ if (cmd_mode) {
+ WARN_ON(intf->type != INTF_DSI);
+ intf->mode = MDP5_INTF_DSI_MODE_COMMAND;
+ } else {
+ if (intf->type == INTF_DSI)
+ intf->mode = MDP5_INTF_DSI_MODE_VIDEO;
+ else
+ intf->mode = MDP5_INTF_MODE_NONE;
+ }
+}
+
/* initialize encoder */
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf, struct mdp5_ctl *ctl)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index c396d459a9d0..3eb0749223d9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -148,7 +148,15 @@ static int mdp5_set_split_display(struct msm_kms *kms,
return mdp5_cmd_encoder_set_split_display(encoder,
slave_encoder);
else
- return mdp5_encoder_set_split_display(encoder, slave_encoder);
+ return mdp5_vid_encoder_set_split_display(encoder,
+ slave_encoder);
+}
+
+static void mdp5_set_encoder_mode(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ bool cmd_mode)
+{
+ mdp5_encoder_set_intf_mode(encoder, cmd_mode);
}
static void mdp5_kms_destroy(struct msm_kms *kms)
@@ -230,6 +238,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
+ .set_encoder_mode = mdp5_set_encoder_mode,
.destroy = mdp5_kms_destroy,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = mdp5_kms_debugfs_init,
@@ -267,7 +276,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
enum mdp5_intf_type intf_type, int intf_num,
- enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl)
+ struct mdp5_ctl *ctl)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -275,21 +284,15 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
struct mdp5_interface intf = {
.num = intf_num,
.type = intf_type,
- .mode = intf_mode,
+ .mode = MDP5_INTF_MODE_NONE,
};
- if ((intf_type == INTF_DSI) &&
- (intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
- encoder = mdp5_cmd_encoder_init(dev, &intf, ctl);
- else
- encoder = mdp5_encoder_init(dev, &intf, ctl);
-
+ encoder = mdp5_encoder_init(dev, &intf, ctl);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct encoder\n");
return encoder;
}
- encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
priv->encoders[priv->num_encoders++] = encoder;
return encoder;
@@ -338,8 +341,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break;
}
- encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
- MDP5_INTF_MODE_NONE, ctl);
+ encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
@@ -357,8 +359,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break;
}
- encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
- MDP5_INTF_MODE_NONE, ctl);
+ encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
@@ -369,9 +370,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
case INTF_DSI:
{
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
- struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
- enum mdp5_intf_mode mode;
- int i;
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
dev_err(dev->dev, "failed to find dsi from intf %d\n",
@@ -389,19 +387,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break;
}
- for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
- mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
- MDP5_INTF_DSI_MODE_COMMAND :
- MDP5_INTF_DSI_MODE_VIDEO;
- dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
- intf_num, mode, ctl);
- if (IS_ERR(dsi_encs[i])) {
- ret = PTR_ERR(dsi_encs[i]);
- break;
- }
+ encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
}
- ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
+ ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
break;
}
default:
@@ -418,20 +410,48 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
const struct mdp5_cfg_hw *hw_cfg;
- int i, ret;
+ unsigned int num_crtcs;
+ int i, ret, pi = 0, ci = 0;
+ struct drm_plane *primary[MAX_BASES] = { NULL };
+ struct drm_plane *cursor[MAX_BASES] = { NULL };
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- /* Construct planes equaling the number of hw pipes, and CRTCs
- * for the N layer-mixers (LM). The first N planes become primary
+ /*
+ * Construct encoders and modeset initialize connector devices
+ * for each external display interface.
+ */
+ for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
+ ret = modeset_init_intf(mdp5_kms, i);
+ if (ret)
+ goto fail;
+ }
+
+ /*
+ * We should ideally have less number of encoders (set up by parsing
+ * the MDP5 interfaces) than the number of layer mixers present in HW,
+ * but let's be safe here anyway
+ */
+ num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count);
+
+ /*
+ * Construct planes equaling the number of hw pipes, and CRTCs for the
+ * N encoders set up by the driver. The first N planes become primary
* planes for the CRTCs, with the remainder as overlay planes:
*/
for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
- bool primary = i < mdp5_cfg->lm.count;
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
struct drm_plane *plane;
- struct drm_crtc *crtc;
+ enum drm_plane_type type;
- plane = mdp5_plane_init(dev, primary);
+ if (i < num_crtcs)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
+ type = DRM_PLANE_TYPE_CURSOR;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ plane = mdp5_plane_init(dev, type);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
@@ -439,10 +459,16 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
}
priv->planes[priv->num_planes++] = plane;
- if (!primary)
- continue;
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ primary[pi++] = plane;
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ cursor[ci++] = plane;
+ }
+
+ for (i = 0; i < num_crtcs; i++) {
+ struct drm_crtc *crtc;
- crtc = mdp5_crtc_init(dev, plane, i);
+ crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
@@ -451,13 +477,14 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->crtcs[priv->num_crtcs++] = crtc;
}
- /* Construct encoders and modeset initialize connector devices
- * for each external display interface.
+ /*
+ * Now that we know the number of crtcs we've created, set the possible
+ * crtcs for the encoders
*/
- for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
- ret = modeset_init_intf(mdp5_kms, i);
- if (ret)
- goto fail;
+ for (i = 0; i < priv->num_encoders; i++) {
+ struct drm_encoder *encoder = priv->encoders[i];
+
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
}
return 0;
@@ -773,6 +800,9 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
static const enum mdp5_pipe dma_planes[] = {
SSPP_DMA0, SSPP_DMA1,
};
+ static const enum mdp5_pipe cursor_planes[] = {
+ SSPP_CURSOR0, SSPP_CURSOR1,
+ };
const struct mdp5_cfg_hw *hw_cfg;
int ret;
@@ -796,6 +826,13 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
if (ret)
return ret;
+ /* Construct cursor pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
+ cursor_planes, hw_cfg->pipe_cursor.base,
+ hw_cfg->pipe_cursor.caps);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index cdfc63d90c7b..9de471191eba 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -126,6 +126,17 @@ struct mdp5_interface {
enum mdp5_intf_mode mode;
};
+struct mdp5_encoder {
+ struct drm_encoder base;
+ struct mdp5_interface intf;
+ spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
+ bool enabled;
+ uint32_t bsc;
+
+ struct mdp5_ctl *ctl;
+};
+#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
+
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
msm_writel(data, mdp5_kms->mmio + reg);
@@ -156,6 +167,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
NAME(RGB0), NAME(RGB1), NAME(RGB2),
NAME(DMA0), NAME(DMA1),
NAME(VIG3), NAME(RGB3),
+ NAME(CURSOR0), NAME(CURSOR1),
#undef NAME
};
return names[pipe];
@@ -231,8 +243,10 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
-struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum drm_plane_type type);
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
@@ -240,25 +254,36 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
struct mdp5_interface *intf, struct mdp5_ctl *ctl);
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, int id);
+ struct drm_plane *plane,
+ struct drm_plane *cursor_plane, int id);
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf, struct mdp5_ctl *ctl);
-int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
- struct drm_encoder *slave_encoder);
+int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode);
int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
#ifdef CONFIG_DRM_MSM_DSI
-struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf, struct mdp5_ctl *ctl);
+void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void mdp5_cmd_encoder_disable(struct drm_encoder *encoder);
+void mdp5_cmd_encoder_enable(struct drm_encoder *encoder);
int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
- struct drm_encoder *slave_encoder);
+ struct drm_encoder *slave_encoder);
#else
-static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf, struct mdp5_ctl *ctl)
+static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+{
+}
+static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
{
- return ERR_PTR(-EINVAL);
}
static inline int mdp5_cmd_encoder_set_split_display(
struct drm_encoder *encoder, struct drm_encoder *slave_encoder)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
index 1ae9dc8d260d..35c4dabb0c0c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
@@ -53,6 +53,14 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
if (caps & ~cur->caps)
continue;
+ /*
+ * don't assign a cursor pipe to a plane that isn't going to
+ * be used as a cursor
+ */
+ if (cur->caps & MDP_PIPE_CAP_CURSOR &&
+ plane->type != DRM_PLANE_TYPE_CURSOR)
+ continue;
+
/* possible candidate, take the one with the
* fewest unneeded caps bits set:
*/
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 25d9d0a97156..0ffb8affef35 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -29,6 +29,11 @@ struct mdp5_plane {
static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_rect *src, struct drm_rect *dest);
+
+static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
@@ -45,7 +50,7 @@ static struct mdp5_kms *get_kms(struct drm_plane *plane)
static bool plane_enabled(struct drm_plane_state *state)
{
- return state->fb && state->crtc;
+ return state->visible;
}
static void mdp5_plane_destroy(struct drm_plane *plane)
@@ -246,6 +251,19 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.atomic_print_state = mdp5_plane_atomic_print_state,
};
+static const struct drm_plane_funcs mdp5_cursor_plane_funcs = {
+ .update_plane = mdp5_update_cursor_plane_legacy,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = mdp5_plane_destroy,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_set_property = mdp5_plane_atomic_set_property,
+ .atomic_get_property = mdp5_plane_atomic_get_property,
+ .reset = mdp5_plane_reset,
+ .atomic_duplicate_state = mdp5_plane_duplicate_state,
+ .atomic_destroy_state = mdp5_plane_destroy_state,
+ .atomic_print_state = mdp5_plane_atomic_print_state,
+};
+
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -272,15 +290,20 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
msm_framebuffer_cleanup(fb, mdp5_kms->id);
}
-static int mdp5_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *state)
{
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
+ struct drm_plane *plane = state->plane;
struct drm_plane_state *old_state = plane->state;
struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
bool new_hwpipe = false;
uint32_t max_width, max_height;
uint32_t caps = 0;
+ struct drm_rect clip;
+ int min_scale, max_scale;
+ int ret;
DBG("%s: check (%d -> %d)", plane->name,
plane_enabled(old_state), plane_enabled(state));
@@ -296,6 +319,18 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
return -ERANGE;
}
+ clip.x1 = 0;
+ clip.y1 = 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+ min_scale = FRAC_16_16(1, 8);
+ max_scale = FRAC_16_16(8, 1);
+
+ ret = drm_plane_helper_check_state(state, &clip, min_scale,
+ max_scale, true, true);
+ if (ret)
+ return ret;
+
if (plane_enabled(state)) {
unsigned int rotation;
const struct mdp_format *format;
@@ -321,6 +356,9 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
if (rotation & DRM_REFLECT_Y)
caps |= MDP_PIPE_CAP_VFLIP;
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ caps |= MDP_PIPE_CAP_CURSOR;
+
/* (re)allocate hw pipe if we don't have one or caps-mismatch: */
if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
new_hwpipe = true;
@@ -356,6 +394,23 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
return 0;
}
+static int mdp5_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ crtc = state->crtc ? state->crtc : plane->state->crtc;
+ if (!crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ return mdp5_plane_atomic_check_with_state(crtc_state, state);
+}
+
static void mdp5_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -368,10 +423,7 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
ret = mdp5_plane_mode_set(plane,
state->crtc, state->fb,
- state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h,
- state->src_x, state->src_y,
- state->src_w, state->src_h);
+ &state->src, &state->dst);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
}
@@ -664,10 +716,7 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+ struct drm_rect *src, struct drm_rect *dest)
{
struct drm_plane_state *pstate = plane->state;
struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
@@ -683,10 +732,14 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t pix_format;
unsigned int rotation;
bool vflip, hflip;
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y;
+ uint32_t src_w, src_h;
unsigned long flags;
int ret;
- nplanes = drm_format_num_planes(fb->pixel_format);
+ nplanes = fb->format->num_planes;
/* bad formats should already be rejected: */
if (WARN_ON(nplanes > pipe2nclients(pipe)))
@@ -695,6 +748,16 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
format = to_mdp_format(msm_framebuffer_format(fb));
pix_format = format->base.pixel_format;
+ src_x = src->x1;
+ src_y = src->y1;
+ src_w = drm_rect_width(src);
+ src_h = drm_rect_height(src);
+
+ crtc_x = dest->x1;
+ crtc_y = dest->y1;
+ crtc_w = drm_rect_width(dest);
+ crtc_h = drm_rect_height(dest);
+
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
@@ -818,12 +881,88 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
return ret;
}
+static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_plane_state *plane_state, *new_plane_state;
+ struct mdp5_plane_state *mdp5_pstate;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ int ret;
+
+ if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state))
+ goto slow;
+
+ plane_state = plane->state;
+ mdp5_pstate = to_mdp5_plane_state(plane_state);
+
+ /* don't use fast path if we don't have a hwpipe allocated yet */
+ if (!mdp5_pstate->hwpipe)
+ goto slow;
+
+ /* only allow changing of position(crtc x/y or src x/y) in fast path */
+ if (plane_state->crtc != crtc ||
+ plane_state->src_w != src_w ||
+ plane_state->src_h != src_h ||
+ plane_state->crtc_w != crtc_w ||
+ plane_state->crtc_h != crtc_h ||
+ !plane_state->fb ||
+ plane_state->fb != fb)
+ goto slow;
+
+ new_plane_state = mdp5_plane_duplicate_state(plane);
+ if (!new_plane_state)
+ return -ENOMEM;
+
+ new_plane_state->src_x = src_x;
+ new_plane_state->src_y = src_y;
+ new_plane_state->src_w = src_w;
+ new_plane_state->src_h = src_h;
+ new_plane_state->crtc_x = crtc_x;
+ new_plane_state->crtc_y = crtc_y;
+ new_plane_state->crtc_w = crtc_w;
+ new_plane_state->crtc_h = crtc_h;
+
+ ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
+ if (ret)
+ goto slow_free;
+
+ if (new_plane_state->visible) {
+ struct mdp5_ctl *ctl;
+
+ ret = mdp5_plane_mode_set(plane, crtc, fb,
+ &new_plane_state->src,
+ &new_plane_state->dst);
+ WARN_ON(ret < 0);
+
+ ctl = mdp5_crtc_get_ctl(crtc);
+
+ mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane));
+ }
+
+ *to_mdp5_plane_state(plane_state) =
+ *to_mdp5_plane_state(new_plane_state);
+
+ mdp5_plane_destroy_state(plane, new_plane_state);
+
+ return 0;
+slow_free:
+ mdp5_plane_destroy_state(plane, new_plane_state);
+slow:
+ return drm_atomic_helper_update_plane(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+}
+
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
if (WARN_ON(!pstate->hwpipe))
- return 0;
+ return SSPP_NONE;
return pstate->hwpipe->pipe;
}
@@ -839,12 +978,12 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
}
/* initialize plane */
-struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum drm_plane_type type)
{
struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane;
int ret;
- enum drm_plane_type type;
mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
if (!mdp5_plane) {
@@ -857,10 +996,16 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats), false);
- type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
- ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
- mdp5_plane->formats, mdp5_plane->nformats,
- type, NULL);
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ ret = drm_universal_plane_init(dev, plane, 0xff,
+ &mdp5_cursor_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ type, NULL);
+ else
+ ret = drm_universal_plane_init(dev, plane, 0xff,
+ &mdp5_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ type, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 303130320748..7574cdfef418 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -112,6 +112,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
#define MDP_PIPE_CAP_CSC BIT(3)
#define MDP_PIPE_CAP_DECIMATION BIT(4)
#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
+#define MDP_PIPE_CAP_CURSOR BIT(6)
static inline bool pipe_supports_yuv(uint32_t pipe_caps)
{
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 30b5d23e53b4..9633a68b14d7 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -93,11 +93,6 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
if (!crtc->state->enable)
continue;
- /* Legacy cursor ioctls are completely unsynced, and userspace
- * relies on that (by doing tons of cursor updates). */
- if (old_state->legacy_cursor_update)
- continue;
-
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
}
}
@@ -151,20 +146,29 @@ static void commit_worker(struct work_struct *work)
complete_commit(container_of(work, struct msm_commit, work), true);
}
+/*
+ * this func is identical to the drm_atomic_helper_check, but we keep this
+ * because we might eventually need to have a more finegrained check
+ * sequence without using the atomic helpers.
+ *
+ * In the past, we first called drm_atomic_helper_check_planes, and then
+ * drm_atomic_helper_check_modeset. We needed this because the MDP5 plane's
+ * ->atomic_check could update ->mode_changed for pixel format changes.
+ * This, however isn't needed now because if there is a pixel format change,
+ * we just assign a new hwpipe for it with a new SMP allocation. We might
+ * eventually hit a condition where we would need to do a full modeset if
+ * we run out of planes. There, we'd probably need to set mode_changed.
+ */
int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
- /*
- * msm ->atomic_check can update ->mode_changed for pixel format
- * changes, hence must be run before we check the modeset changes.
- */
- ret = drm_atomic_helper_check_planes(dev, state);
+ ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
- ret = drm_atomic_helper_check_modeset(dev, state);
+ ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index c1b40f5adb60..387f0616e115 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -52,7 +52,11 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
{
- return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
+
+ return 0;
}
static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e29bb66f55b1..70226eaa5cac 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -91,6 +91,25 @@ module_param(dumpstate, bool, 0600);
* Util/helpers:
*/
+struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
+{
+ struct clk *clk;
+ char name2[32];
+
+ clk = devm_clk_get(&pdev->dev, name);
+ if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+ return clk;
+
+ snprintf(name2, sizeof(name2), "%s_clk", name);
+
+ clk = devm_clk_get(&pdev->dev, name2);
+ if (!IS_ERR(clk))
+ dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
+ "\"%s\" instead of \"%s\"\n", name, name2);
+
+ return clk;
+}
+
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname)
{
@@ -985,6 +1004,7 @@ static int add_display_components(struct device *dev,
* as components.
*/
static const struct of_device_id msm_gpu_match[] = {
+ { .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
{ .compatible = "qcom,kgsl-3d0" },
{ },
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index ed4dad3ca133..cdd7b2f8e977 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -275,16 +275,11 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
struct drm_encoder *encoder);
struct msm_dsi;
-enum msm_dsi_encoder_id {
- MSM_DSI_VIDEO_ENCODER_ID = 0,
- MSM_DSI_CMD_ENCODER_ID = 1,
- MSM_DSI_ENCODER_NUM = 2
-};
#ifdef CONFIG_DRM_MSM_DSI
void __init msm_dsi_register(void);
void __exit msm_dsi_unregister(void);
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
- struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]);
+ struct drm_encoder *encoder);
#else
static inline void __init msm_dsi_register(void)
{
@@ -293,8 +288,8 @@ static inline void __exit msm_dsi_unregister(void)
{
}
static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
- struct drm_device *dev,
- struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+ struct drm_device *dev,
+ struct drm_encoder *encoder)
{
return -EINVAL;
}
@@ -318,6 +313,7 @@ static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
#endif
+struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname);
void msm_writel(u32 data, void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 9acf544e7a8f..5cf165c9c3a9 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -41,7 +41,7 @@ static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = fb->format->num_planes;
DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
@@ -65,10 +65,10 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
- fb->width, fb->height, (char *)&fb->pixel_format,
+ fb->width, fb->height, (char *)&fb->format->format,
drm_framebuffer_read_refcount(fb), fb->base.id);
for (i = 0; i < n; i++) {
@@ -87,7 +87,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ int ret, i, n = fb->format->num_planes;
uint64_t iova;
for (i = 0; i < n; i++) {
@@ -103,7 +103,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
msm_gem_put_iova(msm_fb->planes[i], id);
@@ -217,7 +217,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->planes[i] = bos[i];
}
- drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index bffe93498512..6b1b375653f7 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -148,7 +148,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
strcpy(fbi->fix.id, "msm");
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
dev->mode_config.fb_base = paddr;
@@ -174,10 +174,8 @@ fail_unlock:
fail:
if (ret) {
- if (fb) {
- drm_framebuffer_unregister_private(fb);
+ if (fb)
drm_framebuffer_remove(fb);
- }
}
return ret;
@@ -203,8 +201,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper,
- priv->num_crtcs, priv->num_connectors);
+ ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
if (ret) {
dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
@@ -247,7 +244,6 @@ void msm_fbdev_free(struct drm_device *dev)
/* this will free the backing object */
if (fbdev->fb) {
msm_gem_put_vaddr(fbdev->bo);
- drm_framebuffer_unregister_private(fbdev->fb);
drm_framebuffer_remove(fbdev->fb);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 1974ccb781de..e140b05af134 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -54,8 +54,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
if (!p)
return ERR_PTR(-ENOMEM);
- ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
- npages, 0, DRM_MM_SEARCH_DEFAULT);
+ ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
if (ret) {
drm_free_large(p);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 489676568a10..1172fe7a9252 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -95,13 +95,13 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
submit->bos[i].flags = 0;
- ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
- if (unlikely(ret)) {
+ if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
pagefault_enable();
spin_unlock(&file->table_lock);
- ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
- if (ret)
+ if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
+ ret = -EFAULT;
goto out;
+ }
spin_lock(&file->table_lock);
pagefault_disable();
}
@@ -317,9 +317,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
uint64_t iova;
bool valid;
- ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
- if (ret)
+ if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
+ ret = -EFAULT;
goto out;
+ }
if (submit_reloc.submit_offset % 4) {
DRM_ERROR("non-aligned reloc offset: %u\n",
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index a311d26ccb21..b654eca7636a 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -45,8 +45,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
if (WARN_ON(drm_mm_node_allocated(&vma->node)))
return 0;
- ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages,
- 0, DRM_MM_SEARCH_DEFAULT);
+ ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index b28527a65d09..99e05aacbee1 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -560,8 +560,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
static const char *clk_names[] = {
- "core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk",
- "mem_iface_clk", "alt_mem_iface_clk",
+ "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface",
};
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@@ -625,13 +624,13 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
/* Acquire clocks: */
for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
- gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+ gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]);
DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
if (IS_ERR(gpu->grp_clks[i]))
gpu->grp_clks[i] = NULL;
}
- gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ gpu->ebi1_clk = msm_clk_get(pdev, "bus");
DBG("ebi1_clk: %p", gpu->ebi1_clk);
if (IS_ERR(gpu->ebi1_clk))
gpu->ebi1_clk = NULL;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 61aaaa1de6eb..7f5779daf5c8 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -24,9 +24,12 @@ struct msm_iommu {
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
-static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
+static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
+ struct msm_iommu *iommu = arg;
+ if (iommu->base.handler)
+ return iommu->base.handler(iommu->base.arg, iova, flags);
pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
return 0;
}
@@ -136,7 +139,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs);
- iommu_set_fault_handler(domain, msm_fault_handler, dev);
+ iommu_set_fault_handler(domain, msm_fault_handler, iommu);
return &iommu->base;
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e470f4cf8f76..117635d2b8c5 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -56,6 +56,9 @@ struct msm_kms_funcs {
struct drm_encoder *encoder,
struct drm_encoder *slave_encoder,
bool is_cmd_mode);
+ void (*set_encoder_mode)(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ bool cmd_mode);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index f85c879e68d2..aa2c5d4580c8 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -33,6 +33,8 @@ struct msm_mmu_funcs {
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
+ int (*handler)(void *arg, unsigned long iova, int flags);
+ void *arg;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
@@ -45,4 +47,11 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
+ int (*handler)(void *arg, unsigned long iova, int flags))
+{
+ mmu->arg = arg;
+ mmu->handler = handler;
+}
+
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 081890336ce7..e10a4eda4078 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -46,7 +46,7 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
{
struct drm_crtc *crtc = &mxsfb->pipe.crtc;
struct drm_device *drm = crtc->dev;
- const u32 format = crtc->primary->state->fb->pixel_format;
+ const u32 format = crtc->primary->state->fb->format->format;
u32 ctrl, ctrl1;
ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 79a18bf48b54..cdfbe0284635 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -218,7 +218,7 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags)
drm_kms_helper_poll_init(drm);
- mxsfb->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ mxsfb->fbdev = drm_fbdev_cma_init(drm, 32,
drm->mode_config.num_connector);
if (IS_ERR(mxsfb->fbdev)) {
mxsfb->fbdev = NULL;
@@ -395,8 +395,8 @@ static int mxsfb_probe(struct platform_device *pdev)
pdev->id_entry = of_id->data;
drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev);
- if (!drm)
- return -ENOMEM;
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
ret = mxsfb_load(drm, 0);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 2922a82cba8e..c02a13406a81 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,6 +1,6 @@
config DRM_NOUVEAU
tristate "Nouveau (NVIDIA) cards"
- depends on DRM && PCI
+ depends on DRM && PCI && MMU
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
@@ -16,6 +16,7 @@ config DRM_NOUVEAU
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
+ select DRM_VM
help
Choose this option for open-source NVIDIA support.
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index a555681c3096..90075b676256 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -198,7 +198,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
int *burst, int *lwm)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct nv_fifo_info fifo_data;
struct nv_sim_state sim_data;
int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
@@ -227,7 +227,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
}
- if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT)
nv04_calc_arb(&fifo_data, &sim_data);
else
nv10_calc_arb(&fifo_data, &sim_data);
@@ -254,7 +254,7 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN)
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
(dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 59d1d1c5de5f..ab7b69c11d40 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -113,8 +113,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->device);
- struct nvkm_clk *clk = nvxx_clk(&drm->device);
+ struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
@@ -138,7 +138,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
* has yet been observed in allowing the use a single stage pll on all
* nv43 however. the behaviour of single stage use is untested on nv40
*/
- if (drm->device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
+ if (drm->client.device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
@@ -148,10 +148,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
/* The blob uses this always, so let's do the same */
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
/* again nv40 and some nv43 act more like nv3x as described above */
- if (drm->device.info.chipset < 0x41)
+ if (drm->client.device.info.chipset < 0x41)
state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
@@ -270,7 +270,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
horizEnd = horizTotal - 2;
horizBlankEnd = horizTotal + 4;
#if 0
- if (dev->overlayAdaptor && drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
+ if (dev->overlayAdaptor && drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
/* This reportedly works around some video overlay bandwidth problems */
horizTotal += 2;
#endif
@@ -460,6 +460,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
+ const struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_encoder *encoder;
bool lvds_output = false, tmds_output = false, tv_output = false,
off_chip_digital = false;
@@ -504,7 +505,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
- if (drm->device.info.chipset >= 0x11)
+ if (drm->client.device.info.chipset >= 0x11)
regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
@@ -545,47 +546,47 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
* 1 << 30 on 0x60.830), for no apparent reason */
regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
regp->crtc_830 = mode->crtc_vdisplay - 3;
regp->crtc_834 = mode->crtc_vdisplay - 1;
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
/* This is what the blob does */
regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
else
regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
/* Some misc regs */
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
regp->CRTC[NV_CIO_CRE_85] = 0xFF;
regp->CRTC[NV_CIO_CRE_86] = 0x1;
}
- regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->primary->fb->depth + 1) / 8;
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (fb->format->depth + 1) / 8;
/* Enable slaved mode (called MODE_TV in nv4ref.h) */
if (lvds_output || tmds_output || tv_output)
regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
/* Generic PRAMDAC regs */
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
/* Only bit that bios and blob set. */
regp->nv10_cursync = (1 << 25);
regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
- if (crtc->primary->fb->depth == 16)
+ if (fb->format->depth == 16)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
- if (drm->device.info.chipset >= 0x11)
+ if (drm->client.device.info.chipset >= 0x11)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
@@ -648,7 +649,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
nv_crtc_mode_set_vga(crtc, adjusted_mode);
/* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
nv_crtc_mode_set_regs(crtc, adjusted_mode);
nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
@@ -709,7 +710,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
/* Some more preparation. */
NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
}
@@ -847,16 +848,16 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
nv_crtc->fb.offset = fb->nvbo->bo.offset;
- if (nv_crtc->lut.depth != drm_fb->depth) {
- nv_crtc->lut.depth = drm_fb->depth;
+ if (nv_crtc->lut.depth != drm_fb->format->depth) {
+ nv_crtc->lut.depth = drm_fb->format->depth;
nv_crtc_gamma_load(crtc);
}
/* Update the framebuffer format. */
regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
- regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->primary->fb->depth + 1) / 8;
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (drm_fb->format->depth + 1) / 8;
regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
- if (crtc->primary->fb->depth == 16)
+ if (drm_fb->format->depth == 16)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
@@ -873,11 +874,11 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
/* Update the framebuffer location. */
regp->fb_start = nv_crtc->fb.offset & ~3;
- regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
+ regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->format->cpp[0]);
nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
/* Update the arbitration parameters. */
- nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
+ nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->format->cpp[0] * 8,
&arb_burst, &arb_lwm);
regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
@@ -885,7 +886,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) {
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN) {
regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
}
@@ -966,7 +967,7 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->device.info.chipset == 0x11) {
+ if (drm->client.device.info.chipset == 0x11) {
pixel = ((pixel & 0x000000ff) << 24) |
((pixel & 0x0000ff00) << 8) |
((pixel & 0x00ff0000) >> 8) |
@@ -1007,7 +1008,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
if (ret)
goto out;
- if (drm->device.info.chipset >= 0x11)
+ if (drm->client.device.info.chipset >= 0x11)
nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
else
nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
@@ -1123,8 +1124,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo);
+ ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
+ TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL,
+ &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false);
if (!ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index c83116a308a4..f26e44ea7389 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index b6cc7766e6f7..4feab0a5419d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -66,7 +66,7 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
static int sample_load_twice(struct drm_device *dev, bool sense[2])
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
int i;
for (i = 0; i < 2; i++) {
@@ -80,19 +80,19 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
* use a 10ms timeout (guards against crtc being inactive, in
* which case blank state would never change)
*/
- if (nvif_msec(&drm->device, 10,
+ if (nvif_msec(&drm->client.device, 10,
if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
break;
) < 0)
return -EBUSY;
- if (nvif_msec(&drm->device, 10,
+ if (nvif_msec(&drm->client.device, 10,
if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
break;
) < 0)
return -EBUSY;
- if (nvif_msec(&drm->device, 10,
+ if (nvif_msec(&drm->client.device, 10,
if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
break;
) < 0)
@@ -133,7 +133,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
uint8_t saved_palette0[3], saved_palette_mask;
@@ -236,8 +236,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -288,7 +288,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
routput = (saved_routput & 0xfffffece) | head << 8;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) {
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE) {
if (dcb->type == DCB_OUTPUT_TV)
routput |= 0x1a << 16;
else
@@ -403,7 +403,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
}
/* This could use refinement for flatpanels, but it should work this way */
- if (drm->device.info.chipset < 0x44)
+ if (drm->client.device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index c2947ef7d4fc..9805d2cdc1a1 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
@@ -290,6 +290,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_display_mode *output_mode = &nv_encoder->mode;
struct drm_connector *connector = &nv_connector->base;
+ const struct drm_framebuffer *fb = encoder->crtc->primary->fb;
uint32_t mode_ratio, panel_ratio;
NV_DEBUG(drm, "Output mode on CRTC %d:\n", nv_crtc->index);
@@ -415,8 +416,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
/* Output property. */
if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
(nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
- encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) {
- if (drm->device.info.chipset == 0x11)
+ fb->format->depth > connector->display_info.bpc * 3)) {
+ if (drm->client.device.info.chipset == 0x11)
regp->dither = savep->dither | 0x00010000;
else {
int i;
@@ -427,7 +428,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
}
}
} else {
- if (drm->device.info.chipset != 0x11) {
+ if (drm->client.device.info.chipset != 0x11) {
/* reset them */
int i;
for (i = 0; i < 3; i++) {
@@ -463,7 +464,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
/* This could use refinement for flatpanels, but it should work this way */
- if (drm->device.info.chipset < 0x44)
+ if (drm->client.device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -485,7 +486,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
{
#ifdef __powerpc__
struct drm_device *dev = encoder->dev;
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
@@ -623,7 +624,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
struct nvkm_i2c_bus_probe info[] = {
{
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 34c0f2f67548..5b9d549aa791 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -35,7 +35,7 @@ int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
@@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev)
if (!disp)
return -ENOMEM;
- nvif_object_map(&drm->device.object);
+ nvif_object_map(&drm->client.device.object);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy;
@@ -139,7 +139,7 @@ nv04_display_destroy(struct drm_device *dev)
nouveau_display(dev)->priv = NULL;
kfree(disp);
- nvif_object_unmap(&drm->device.object);
+ nvif_object_unmap(&drm->client.device.object);
}
int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 7030307d2d48..bea4543554ba 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -129,7 +129,7 @@ nv_two_heads(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
const int impl = dev->pdev->device & 0x0ff0;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
return true;
@@ -148,7 +148,7 @@ nv_two_reg_pll(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
const int impl = dev->pdev->device & 0x0ff0;
- if (impl == 0x0310 || impl == 0x0340 || drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE)
+ if (impl == 0x0310 || impl == 0x0340 || drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE)
return true;
return false;
}
@@ -170,7 +170,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
struct dcb_output *outp, int crtc)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->device);
+ struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
struct nvbios_init init = {
.subdev = &bios->subdev,
.bios = bios,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index e64f52464ecf..b98599002831 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -89,7 +89,7 @@ NVSetOwner(struct drm_device *dev, int owner)
if (owner == 1)
owner *= 3;
- if (drm->device.info.chipset == 0x11) {
+ if (drm->client.device.info.chipset == 0x11) {
/* This might seem stupid, but the blob does it and
* omitting it often locks the system up.
*/
@@ -100,7 +100,7 @@ NVSetOwner(struct drm_device *dev, int owner)
/* CR44 is always changed on CRTC0 */
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
- if (drm->device.info.chipset == 0x11) { /* set me harder */
+ if (drm->client.device.info.chipset == 0x11) { /* set me harder */
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
}
@@ -149,7 +149,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
pllvals->NM1 = pll1 & 0xffff;
if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
pllvals->NM2 = pll2 & 0xffff;
- else if (drm->device.info.chipset == 0x30 || drm->device.info.chipset == 0x35) {
+ else if (drm->client.device.info.chipset == 0x30 || drm->client.device.info.chipset == 0x35) {
pllvals->M1 &= 0xf; /* only 4 bits */
if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
pllvals->M2 = (pll1 >> 4) & 0x7;
@@ -165,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
struct nvkm_pll_vals *pllvals)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_object *device = &drm->device.object;
- struct nvkm_bios *bios = nvxx_bios(&drm->device);
+ struct nvif_object *device = &drm->client.device.object;
+ struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
uint32_t reg1, pll1, pll2 = 0;
struct nvbios_pll pll_lim;
int ret;
@@ -184,7 +184,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
pll2 = nvif_rd32(device, reg2);
}
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
/* check whether vpll has been forced into single stage mode */
@@ -253,7 +253,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
struct nvkm_clk *clk = nvxx_clk(device);
struct nvkm_bios *bios = nvxx_bios(device);
struct nvbios_pll pll_lim;
@@ -392,21 +392,21 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
int i;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
if (nv_two_heads(dev))
state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
- if (drm->device.info.chipset == 0x11)
+ if (drm->client.device.info.chipset == 0x11)
regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
if (nv_gf4_disp_arch(dev))
regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
- if (drm->device.info.chipset >= 0x30)
+ if (drm->client.device.info.chipset >= 0x30)
regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
@@ -448,7 +448,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
if (nv_gf4_disp_arch(dev))
regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
@@ -464,26 +464,26 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_clk *clk = nvxx_clk(&drm->device);
+ struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
int i;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
clk->pll_prog(clk, pllreg, &regp->pllvals);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
if (nv_two_heads(dev))
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
- if (drm->device.info.chipset == 0x11)
+ if (drm->client.device.info.chipset == 0x11)
NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
if (nv_gf4_disp_arch(dev))
NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
- if (drm->device.info.chipset >= 0x30)
+ if (drm->client.device.info.chipset >= 0x30)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
@@ -520,7 +520,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
if (nv_gf4_disp_arch(dev))
NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
@@ -601,10 +601,10 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
rd_cio_state(dev, head, regp, 0x9f);
rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
@@ -613,14 +613,14 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
if (nv_two_heads(dev))
@@ -632,7 +632,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -661,12 +661,12 @@ nv_load_state_ext(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t reg900;
int i;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
if (nv_two_heads(dev))
/* setting ENGINE_CTRL (EC) *must* come before
* CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
@@ -678,20 +678,20 @@ nv_load_state_ext(struct drm_device *dev, int head,
nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
- nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1);
- nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1);
- nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1);
- nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->client.device.info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->client.device.info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->client.device.info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->client.device.info.ram_size - 1);
nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
@@ -714,23 +714,23 @@ nv_load_state_ext(struct drm_device *dev, int head,
wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
wr_cio_state(dev, head, regp, 0x9f);
wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, head);
wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -738,14 +738,14 @@ nv_load_state_ext(struct drm_device *dev, int head,
}
/* NV11 and NV20 stop at 0x52. */
if (nv_gf4_disp_arch(dev)) {
- if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
/* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */
- nvif_msec(&drm->device, 650,
+ nvif_msec(&drm->client.device, 650,
if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
break;
);
- nvif_msec(&drm->device, 650,
+ nvif_msec(&drm->client.device, 650,
if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
break;
);
@@ -771,7 +771,7 @@ static void
nv_save_state_palette(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
int head_offset = head * NV_PRMDIO_SIZE, i;
nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
@@ -790,7 +790,7 @@ void
nouveau_hw_load_state_palette(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
int head_offset = head * NV_PRMDIO_SIZE, i;
nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
@@ -810,7 +810,7 @@ void nouveau_hw_save_state(struct drm_device *dev, int head,
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->device.info.chipset == 0x11)
+ if (drm->client.device.info.chipset == 0x11)
/* NB: no attempt is made to restore the bad pll later on */
nouveau_hw_fix_bad_vpll(dev, head);
nv_save_state_ramdac(dev, head, state);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 3bded60c5596..3a2be47fb4f1 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -60,7 +60,7 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
static inline uint32_t NVReadCRTC(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
uint32_t val;
if (head)
reg += NV_PCRTC0_SIZE;
@@ -71,7 +71,7 @@ static inline uint32_t NVReadCRTC(struct drm_device *dev,
static inline void NVWriteCRTC(struct drm_device *dev,
int head, uint32_t reg, uint32_t val)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
if (head)
reg += NV_PCRTC0_SIZE;
nvif_wr32(device, reg, val);
@@ -80,7 +80,7 @@ static inline void NVWriteCRTC(struct drm_device *dev,
static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
uint32_t val;
if (head)
reg += NV_PRAMDAC0_SIZE;
@@ -91,7 +91,7 @@ static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
static inline void NVWriteRAMDAC(struct drm_device *dev,
int head, uint32_t reg, uint32_t val)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
if (head)
reg += NV_PRAMDAC0_SIZE;
nvif_wr32(device, reg, val);
@@ -120,7 +120,7 @@ static inline void nv_write_tmds(struct drm_device *dev,
static inline void NVWriteVgaCrtc(struct drm_device *dev,
int head, uint8_t index, uint8_t value)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
}
@@ -128,7 +128,7 @@ static inline void NVWriteVgaCrtc(struct drm_device *dev,
static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
int head, uint8_t index)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
uint8_t val;
nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
@@ -165,13 +165,13 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t val;
/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
* NVSetOwner for the relevant head to be programmed */
- if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
+ if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
reg += NV_PRMVIO_SIZE;
val = nvif_rd08(device, reg);
@@ -181,12 +181,12 @@ static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
static inline void NVWritePRMVIO(struct drm_device *dev,
int head, uint32_t reg, uint8_t value)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
* NVSetOwner for the relevant head to be programmed */
- if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
+ if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
reg += NV_PRMVIO_SIZE;
nvif_wr08(device, reg, value);
@@ -194,14 +194,14 @@ static inline void NVWritePRMVIO(struct drm_device *dev,
static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
}
static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
}
@@ -209,7 +209,7 @@ static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
static inline void NVWriteVgaAttr(struct drm_device *dev,
int head, uint8_t index, uint8_t value)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
if (NVGetEnablePalette(dev, head))
index &= ~0x20;
else
@@ -223,7 +223,7 @@ static inline void NVWriteVgaAttr(struct drm_device *dev,
static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
int head, uint8_t index)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
uint8_t val;
if (NVGetEnablePalette(dev, head))
index &= ~0x20;
@@ -259,10 +259,10 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
static inline bool
nv_heads_tied(struct drm_device *dev)
{
- struct nvif_object *device = &nouveau_drm(dev)->device.object;
+ struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->device.info.chipset == 0x11)
+ if (drm->client.device.info.chipset == 0x11)
return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
@@ -318,7 +318,7 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock)
NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
/* NV11 has independently lockable extended crtcs, except when tied */
- if (drm->device.info.chipset == 0x11 && !nv_heads_tied(dev))
+ if (drm->client.device.info.chipset == 0x11 && !nv_heads_tied(dev))
NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
lock ? NV_CIO_SR_LOCK_VALUE :
NV_CIO_SR_UNLOCK_RW_VALUE);
@@ -335,7 +335,7 @@ static inline int nv_cursor_width(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- return drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
+ return drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
}
static inline void
@@ -357,7 +357,7 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
- if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) {
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT) {
/*
* Hilarious, the 24th bit doesn't want to stick to
* PCRTC_START...
@@ -382,7 +382,7 @@ nv_show_cursor(struct drm_device *dev, int head, bool show)
*curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, head);
}
@@ -398,7 +398,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
bpp = 8;
/* Alignment requirements taken from the Haiku driver */
- if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT)
mask = 128 / bpp - 1;
else
mask = 512 / bpp - 1;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index a79514d440b3..5319f2a7f24d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -97,7 +97,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_w, uint32_t src_h)
{
struct nouveau_drm *drm = nouveau_drm(plane->dev);
- struct nvif_object *dev = &drm->device.object;
+ struct nvif_object *dev = &drm->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -119,7 +119,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (format > 0xffff)
return -ERANGE;
- if (drm->device.info.chipset >= 0x30) {
+ if (drm->client.device.info.chipset >= 0x30) {
if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
return -ERANGE;
} else {
@@ -145,16 +145,16 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nvif_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
nvif_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
- if (fb->pixel_format != DRM_FORMAT_UYVY)
+ if (fb->format->format != DRM_FORMAT_UYVY)
format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
- if (fb->pixel_format == DRM_FORMAT_NV12)
+ if (fb->format->format == DRM_FORMAT_NV12)
format |= NV_PVIDEO_FORMAT_PLANAR;
if (nv_plane->iturbt_709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (nv_plane->colorkey & (1 << 24))
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
- if (fb->pixel_format == DRM_FORMAT_NV12) {
+ if (fb->format->format == DRM_FORMAT_NV12) {
nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
nv_fb->nvbo->bo.offset + fb->offsets[1]);
@@ -174,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
static int
nv10_disable_plane(struct drm_plane *plane)
{
- struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
+ struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
@@ -198,7 +198,7 @@ nv_destroy_plane(struct drm_plane *plane)
static void
nv10_set_params(struct nouveau_plane *plane)
{
- struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object;
+ struct nvif_object *dev = &nouveau_drm(plane->base.dev)->client.device.object;
u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
(cos_mul(plane->hue, plane->saturation) & 0xffff);
@@ -268,7 +268,7 @@ nv10_overlay_init(struct drm_device *device)
if (!plane)
return;
- switch (drm->device.info.chipset) {
+ switch (drm->client.device.info.chipset) {
case 0x10:
case 0x11:
case 0x15:
@@ -347,7 +347,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
+ struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -411,7 +411,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (nv_plane->colorkey & (1 << 24))
overlay |= 0x10;
- if (fb->pixel_format == DRM_FORMAT_YUYV)
+ if (fb->format->format == DRM_FORMAT_YUYV)
overlay |= 0x100;
nvif_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
@@ -427,7 +427,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
static int
nv04_disable_plane(struct drm_plane *plane)
{
- struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
+ struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
@@ -495,7 +495,7 @@ err:
void
nouveau_overlay_init(struct drm_device *device)
{
- struct nvif_device *dev = &nouveau_drm(device)->device;
+ struct nvif_device *dev = &nouveau_drm(device)->client.device;
if (dev->info.chipset < 0x10)
nv04_overlay_init(device);
else if (dev->info.chipset <= 0x40)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 477a8d072af4..01664357d3e1 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -54,7 +54,7 @@ static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
if (bus) {
return nvkm_i2c_bus_probe(bus, "TV encoder",
@@ -206,7 +206,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
int type, ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 434d1e29f279..6d99f11fee4e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -46,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -130,7 +130,7 @@ static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
if (device->quirk && device->quirk->tv_pin_mask) {
*pin_mask = device->quirk->tv_pin_mask;
@@ -154,8 +154,8 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
return connector_status_disconnected;
if (reliable) {
- if (drm->device.info.chipset == 0x42 ||
- drm->device.info.chipset == 0x43)
+ if (drm->client.device.info.chipset == 0x42 ||
+ drm->client.device.info.chipset == 0x43)
tv_enc->pin_mask =
nv42_tv_sample_load(encoder) >> 28 & 0xe;
else
@@ -362,7 +362,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -435,7 +435,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
/* Set the DACCLK register */
dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
dacclk |= 0x1a << 16;
if (tv_norm->kind == CTV_ENC_MODE) {
@@ -492,7 +492,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
tv_regs->ptv_614 = 0x13;
}
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) {
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) {
tv_regs->ptv_500 = 0xe8e0;
tv_regs->ptv_504 = 0x1710;
tv_regs->ptv_604 = 0x0;
@@ -587,7 +587,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
/* This could use refinement for flatpanels, but it should work */
- if (drm->device.info.chipset < 0x44)
+ if (drm->client.device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
nv04_dac_output_offset(encoder),
0xf0000000);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 1b07521cde0d..29773b325bd9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -130,13 +130,13 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_device *device = &nouveau_drm(dev)->client.device;
nvif_wr32(&device->object, reg, val);
}
static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_device *device = &nouveau_drm(dev)->client.device;
return nvif_rd32(&device->object, reg);
}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
index 05e6ef7cd190..91e33db21a2f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
@@ -10,5 +10,5 @@ struct g82_channel_dma_v0 {
__u64 offset;
};
-#define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
+#define NV826E_V0_NTFY_NON_STALL_INTERRUPT 0x00
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
index cecafcb1e954..e34efd4ec537 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
@@ -11,5 +11,5 @@ struct g82_channel_gpfifo_v0 {
__u64 vm;
};
-#define G82_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00
+#define NV826F_V0_NTFY_NON_STALL_INTERRUPT 0x00
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
index 2caf0838fcfd..a2d5410a491b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
@@ -10,5 +10,6 @@ struct fermi_channel_gpfifo_v0 {
__u64 vm;
};
-#define FERMI_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00
+#define NV906F_V0_NTFY_NON_STALL_INTERRUPT 0x00
+#define NV906F_V0_NTFY_KILLED 0x01
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 46301ec018ce..2efa3d048bb9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -25,5 +25,6 @@ struct kepler_channel_gpfifo_a_v0 {
__u64 vm;
};
-#define NVA06F_V0_NTFY_UEVENT 0x00
+#define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00
+#define NVA06F_V0_NTFY_KILLED 0x01
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 82235f30277c..3a2c0137d4b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -2,23 +2,31 @@
#define __NVIF_CLASS_H__
/* these class numbers are made up by us, and not nvidia-assigned */
-#define NVIF_CLASS_CONTROL /* if0001.h */ -1
-#define NVIF_CLASS_PERFMON /* if0002.h */ -2
-#define NVIF_CLASS_PERFDOM /* if0003.h */ -3
-#define NVIF_CLASS_SW_NV04 /* if0004.h */ -4
-#define NVIF_CLASS_SW_NV10 /* if0005.h */ -5
-#define NVIF_CLASS_SW_NV50 /* if0005.h */ -6
-#define NVIF_CLASS_SW_GF100 /* if0005.h */ -7
+#define NVIF_CLASS_CLIENT /* if0000.h */ -0x00000000
+
+#define NVIF_CLASS_CONTROL /* if0001.h */ -0x00000001
+
+#define NVIF_CLASS_PERFMON /* if0002.h */ -0x00000002
+#define NVIF_CLASS_PERFDOM /* if0003.h */ -0x00000003
+
+#define NVIF_CLASS_SW_NV04 /* if0004.h */ -0x00000004
+#define NVIF_CLASS_SW_NV10 /* if0005.h */ -0x00000005
+#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
+#define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007
/* the below match nvidia-assigned (either in hw, or sw) class numbers */
+#define NV_NULL_CLASS 0x00000030
+
#define NV_DEVICE /* cl0080.h */ 0x00000080
#define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002
#define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003
#define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d
+#define NV50_TWOD 0x0000502d
#define FERMI_TWOD_A 0x0000902d
+#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039
#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
@@ -99,6 +107,12 @@
#define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e
#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
+#define NV50_TESLA 0x00005097
+#define G82_TESLA 0x00008297
+#define GT200_TESLA 0x00008397
+#define GT214_TESLA 0x00008597
+#define GT21A_TESLA 0x00008697
+
#define FERMI_A /* cl9097.h */ 0x00009097
#define FERMI_B /* cl9097.h */ 0x00009197
#define FERMI_C /* cl9097.h */ 0x00009297
@@ -140,6 +154,8 @@
#define FERMI_DECOMPRESS 0x000090b8
+#define NV50_COMPUTE 0x000050c0
+#define GT214_COMPUTE 0x000085c0
#define FERMI_COMPUTE_A 0x000090c0
#define FERMI_COMPUTE_B 0x000091c0
#define KEPLER_COMPUTE_A 0x0000a0c0
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index 4a7f6f7b836d..b52a8eadce01 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -11,8 +11,7 @@ struct nvif_client {
bool super;
};
-int nvif_client_init(const char *drv, const char *name, u64 device,
- const char *cfg, const char *dbg,
+int nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
struct nvif_client *);
void nvif_client_fini(struct nvif_client *);
int nvif_client_ioctl(struct nvif_client *, void *, u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 8bd39e69229c..0c6f48d8140a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -1,5 +1,7 @@
#ifndef __NVIF_DRIVER_H__
#define __NVIF_DRIVER_H__
+#include <nvif/os.h>
+struct nvif_client;
struct nvif_driver {
const char *name;
@@ -14,9 +16,11 @@ struct nvif_driver {
bool keep;
};
+int nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
+ const char *name, u64 device, struct nvif_client *);
+
extern const struct nvif_driver nvif_driver_nvkm;
extern const struct nvif_driver nvif_driver_drm;
extern const struct nvif_driver nvif_driver_lib;
extern const struct nvif_driver nvif_driver_null;
-
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
index 85c44e8a1201..c2c0fc41e017 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0000.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
@@ -1,9 +1,16 @@
#ifndef __NVIF_IF0000_H__
#define __NVIF_IF0000_H__
-#define NV_CLIENT_DEVLIST 0x00
+struct nvif_client_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 device;
+ char name[32];
+};
+
+#define NVIF_CLIENT_V0_DEVLIST 0x00
-struct nv_client_devlist_v0 {
+struct nvif_client_devlist_v0 {
__u8 version;
__u8 count;
__u8 pad02[6];
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index eaf5905a87a3..e876634da10a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -1,5 +1,6 @@
#ifndef __NVKM_CLIENT_H__
#define __NVKM_CLIENT_H__
+#define nvkm_client(p) container_of((p), struct nvkm_client, object)
#include <core/object.h>
struct nvkm_client {
@@ -8,9 +9,8 @@ struct nvkm_client {
u64 device;
u32 debug;
- struct nvkm_client_notify *notify[16];
+ struct nvkm_client_notify *notify[32];
struct rb_root objroot;
- struct rb_root dmaroot;
bool super;
void *data;
@@ -19,15 +19,11 @@ struct nvkm_client {
struct nvkm_vm *vm;
};
-bool nvkm_client_insert(struct nvkm_client *, struct nvkm_object *);
-void nvkm_client_remove(struct nvkm_client *, struct nvkm_object *);
-struct nvkm_object *nvkm_client_search(struct nvkm_client *, u64 object);
-
int nvkm_client_new(const char *name, u64 device, const char *cfg,
- const char *dbg, struct nvkm_client **);
-void nvkm_client_del(struct nvkm_client **);
-int nvkm_client_init(struct nvkm_client *);
-int nvkm_client_fini(struct nvkm_client *, bool suspend);
+ const char *dbg,
+ int (*)(const void *, u32, const void *, u32),
+ struct nvkm_client **);
+struct nvkm_client *nvkm_client_search(struct nvkm_client *, u64 handle);
int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *,
void *data, u32 size);
@@ -37,8 +33,8 @@ int nvkm_client_notify_put(struct nvkm_client *, int index);
/* logging for client-facing objects */
#define nvif_printk(o,l,p,f,a...) do { \
- struct nvkm_object *_object = (o); \
- struct nvkm_client *_client = _object->client; \
+ const struct nvkm_object *_object = (o); \
+ const struct nvkm_client *_client = _object->client; \
if (_client->debug >= NV_DBG_##l) \
printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name, \
_object->handle, _object->oclass, ##a); \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 6bc712f32c8b..d426b86e2712 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -262,7 +262,7 @@ extern const struct nvkm_sclass nvkm_udevice_sclass;
/* device logging */
#define nvdev_printk_(d,l,p,f,a...) do { \
- struct nvkm_device *_device = (d); \
+ const struct nvkm_device *_device = (d); \
if (_device->debug >= (l)) \
dev_##p(_device->dev, f, ##a); \
} while(0)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index 9ebfd8782366..d4cd2fbfde88 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -20,6 +20,7 @@ struct nvkm_engine_func {
int (*fini)(struct nvkm_engine *, bool suspend);
void (*intr)(struct nvkm_engine *);
void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *);
+ bool (*chsw_load)(struct nvkm_engine *);
struct {
int (*sclass)(struct nvkm_oclass *, int index,
@@ -44,4 +45,5 @@ int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
void nvkm_engine_unref(struct nvkm_engine **);
void nvkm_engine_tile(struct nvkm_engine *, int region);
+bool nvkm_engine_chsw_load(struct nvkm_engine *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index 9363b839a9da..33ca6769266a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -6,9 +6,10 @@ struct nvkm_vma;
struct nvkm_vm;
enum nvkm_memory_target {
- NVKM_MEM_TARGET_INST,
- NVKM_MEM_TARGET_VRAM,
- NVKM_MEM_TARGET_HOST,
+ NVKM_MEM_TARGET_INST, /* instance memory */
+ NVKM_MEM_TARGET_VRAM, /* video memory */
+ NVKM_MEM_TARGET_HOST, /* coherent system memory */
+ NVKM_MEM_TARGET_NCOH, /* non-coherent system memory */
};
struct nvkm_memory {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index d92fd41e4056..7bd4897a8a2a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -5,7 +5,7 @@
struct nvkm_mm_node {
struct list_head nl_entry;
struct list_head fl_entry;
- struct list_head rl_entry;
+ struct nvkm_mm_node *next;
#define NVKM_MM_HEAP_ANY 0x00
u8 heap;
@@ -38,4 +38,10 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
u32 size_min, u32 align, struct nvkm_mm_node **);
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
void nvkm_mm_dump(struct nvkm_mm *, const char *);
+
+static inline bool
+nvkm_mm_contiguous(struct nvkm_mm_node *node)
+{
+ return !node->next;
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index dcd048b91fac..96dda350ada3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -62,6 +62,11 @@ int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32 data);
int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
struct nvkm_gpuobj **);
+bool nvkm_object_insert(struct nvkm_object *);
+void nvkm_object_remove(struct nvkm_object *);
+struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object,
+ const struct nvkm_object_func *);
+
struct nvkm_sclass {
int minver;
int maxver;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 57adefa8b08e..ca9ed3d68f44 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -32,7 +32,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
/* subdev logging */
#define nvkm_printk_(s,l,p,f,a...) do { \
- struct nvkm_subdev *_subdev = (s); \
+ const struct nvkm_subdev *_subdev = (s); \
if (_subdev->debug >= (l)) { \
dev_##p(_subdev->device->dev, "%s: "f, \
nvkm_subdev_name[_subdev->index], ##a); \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index 114bfb737a81..d2a6532ce3b9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -12,9 +12,6 @@ struct nvkm_dmaobj {
u32 access;
u64 start;
u64 limit;
-
- struct rb_node rb;
- u64 handle; /*XXX HANDLE MERGE */
};
struct nvkm_dma {
@@ -22,8 +19,7 @@ struct nvkm_dma {
struct nvkm_engine engine;
};
-struct nvkm_dmaobj *
-nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object);
+struct nvkm_dmaobj *nvkm_dmaobj_search(struct nvkm_client *, u64 object);
int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index e6baf039c269..7e498e65b1e8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -4,13 +4,26 @@
#include <core/engine.h>
struct nvkm_fifo_chan;
+enum nvkm_falcon_dmaidx {
+ FALCON_DMAIDX_UCODE = 0,
+ FALCON_DMAIDX_VIRT = 1,
+ FALCON_DMAIDX_PHYS_VID = 2,
+ FALCON_DMAIDX_PHYS_SYS_COH = 3,
+ FALCON_DMAIDX_PHYS_SYS_NCOH = 4,
+};
+
struct nvkm_falcon {
const struct nvkm_falcon_func *func;
- struct nvkm_engine engine;
-
+ const struct nvkm_subdev *owner;
+ const char *name;
u32 addr;
- u8 version;
- u8 secret;
+
+ struct mutex mutex;
+ const struct nvkm_subdev *user;
+
+ u8 version;
+ u8 secret;
+ bool debug;
struct nvkm_memory *core;
bool external;
@@ -19,15 +32,25 @@ struct nvkm_falcon {
u32 limit;
u32 *data;
u32 size;
+ u8 ports;
} code;
struct {
u32 limit;
u32 *data;
u32 size;
+ u8 ports;
} data;
+
+ struct nvkm_engine engine;
};
+int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
+ struct nvkm_falcon **);
+void nvkm_falcon_del(struct nvkm_falcon **);
+int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
+void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
+
int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
int index, bool enable, u32 addr, struct nvkm_engine **);
@@ -42,6 +65,51 @@ struct nvkm_falcon_func {
} data;
void (*init)(struct nvkm_falcon *);
void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+ void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
+ void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
+ void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
+ void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *);
+ int (*wait_for_halt)(struct nvkm_falcon *, u32);
+ int (*clear_interrupt)(struct nvkm_falcon *, u32);
+ void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
+ void (*start)(struct nvkm_falcon *);
+ int (*enable)(struct nvkm_falcon *falcon);
+ void (*disable)(struct nvkm_falcon *falcon);
+
struct nvkm_sclass sclass[];
};
+
+static inline u32
+nvkm_falcon_rd32(struct nvkm_falcon *falcon, u32 addr)
+{
+ return nvkm_rd32(falcon->owner->device, falcon->addr + addr);
+}
+
+static inline void
+nvkm_falcon_wr32(struct nvkm_falcon *falcon, u32 addr, u32 data)
+{
+ nvkm_wr32(falcon->owner->device, falcon->addr + addr, data);
+}
+
+static inline u32
+nvkm_falcon_mask(struct nvkm_falcon *falcon, u32 addr, u32 mask, u32 val)
+{
+ struct nvkm_device *device = falcon->owner->device;
+
+ return nvkm_mask(device, falcon->addr + addr, mask, val);
+}
+
+void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8,
+ bool);
+void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
+void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
+void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *);
+void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
+void nvkm_falcon_start(struct nvkm_falcon *);
+int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);
+int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32);
+int nvkm_falcon_enable(struct nvkm_falcon *);
+void nvkm_falcon_disable(struct nvkm_falcon *);
+int nvkm_falcon_reset(struct nvkm_falcon *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index ed92fec5292c..24efa900d8ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -40,6 +40,7 @@ struct nvkm_fifo {
struct nvkm_event uevent; /* async user trigger */
struct nvkm_event cevent; /* channel creation event */
+ struct nvkm_event kevent; /* channel killed */
};
void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
new file mode 100644
index 000000000000..f5f4a14c4030
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
@@ -0,0 +1,26 @@
+#ifndef __NVBIOS_POWER_BUDGET_H__
+#define __NVBIOS_POWER_BUDGET_H__
+
+#include <nvkm/subdev/bios.h>
+
+struct nvbios_power_budget_entry {
+ u32 min_w;
+ u32 avg_w;
+ u32 max_w;
+};
+
+struct nvbios_power_budget {
+ u32 offset;
+ u8 ver;
+ u8 hlen;
+ u8 elen;
+ u8 ecount;
+ u8 cap_entry;
+};
+
+int nvbios_power_budget_header(struct nvkm_bios *,
+ struct nvbios_power_budget *);
+int nvbios_power_budget_entry(struct nvkm_bios *, struct nvbios_power_budget *,
+ u8 idx, struct nvbios_power_budget_entry *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 794e432578b2..0b26a4c860ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -29,7 +29,7 @@ struct nvkm_mem {
u8 page_shift;
struct nvkm_mm_node *tag;
- struct list_head regions;
+ struct nvkm_mm_node *mem;
dma_addr_t *pages;
u32 memtype;
u64 offset;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
index 3c2ddd975273..b7a9b041e130 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -8,6 +8,9 @@ struct nvkm_iccsense {
bool data_valid;
struct list_head sensors;
struct list_head rails;
+
+ u32 power_w_max;
+ u32 power_w_crit;
};
int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 27d25b18d85c..e68ba636741b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -9,6 +9,7 @@ struct nvkm_mc {
void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
+bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_devidx);
void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
void nvkm_mc_intr(struct nvkm_device *, bool *handled);
void nvkm_mc_intr_unarm(struct nvkm_device *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index e6523e2cea9f..ac2a695963c1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -43,6 +43,7 @@ int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int g92_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index f37538eb1fe5..179b6ed3f595 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -1,10 +1,12 @@
#ifndef __NVKM_PMU_H__
#define __NVKM_PMU_H__
#include <core/subdev.h>
+#include <engine/falcon.h>
struct nvkm_pmu {
const struct nvkm_pmu_func *func;
struct nvkm_subdev subdev;
+ struct nvkm_falcon *falcon;
struct {
u32 base;
@@ -35,6 +37,7 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index b04c38c07761..5dbd8aa4f8c2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -26,7 +26,7 @@
#include <core/subdev.h>
enum nvkm_secboot_falcon {
- NVKM_SECBOOT_FALCON_PMU = 0,
+ NVKM_SECBOOT_FALCON_PMU = 0,
NVKM_SECBOOT_FALCON_RESERVED = 1,
NVKM_SECBOOT_FALCON_FECS = 2,
NVKM_SECBOOT_FALCON_GPCCS = 3,
@@ -35,22 +35,23 @@ enum nvkm_secboot_falcon {
};
/**
- * @base: base IO address of the falcon performing secure boot
- * @irq_mask: IRQ mask of the falcon performing secure boot
- * @enable_mask: enable mask of the falcon performing secure boot
+ * @wpr_set: whether the WPR region is currently set
*/
struct nvkm_secboot {
const struct nvkm_secboot_func *func;
+ struct nvkm_acr *acr;
struct nvkm_subdev subdev;
+ struct nvkm_falcon *boot_falcon;
- enum nvkm_devidx devidx;
- u32 base;
+ u64 wpr_addr;
+ u32 wpr_size;
+
+ bool wpr_set;
};
#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon);
-int nvkm_secboot_reset(struct nvkm_secboot *, u32 falcon);
-int nvkm_secboot_start(struct nvkm_secboot *, u32 falcon);
+int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon);
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 82d3e28918fd..6a567fe347b3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -48,10 +48,8 @@ void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *);
} while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \
\
if (_taken >= _nsecs) { \
- if (_warn) { \
- dev_warn(_device->dev, "timeout at %s:%d/%s()!\n", \
- __FILE__, __LINE__, __func__); \
- } \
+ if (_warn) \
+ dev_WARN(_device->dev, "timeout\n"); \
_taken = -ETIMEDOUT; \
} \
_taken; \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index 71ebbfd4484f..d23209b62c25 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -11,6 +11,7 @@ struct nvkm_top {
u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
+int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_devidx);
enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 7bd4683216d0..f98f800cc011 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -87,7 +87,7 @@ nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
s32
nouveau_abi16_swclass(struct nouveau_drm *drm)
{
- switch (drm->device.info.family) {
+ switch (drm->client.device.info.family) {
case NV_DEVICE_INFO_V0_TNT:
return NVIF_CLASS_SW_NV04;
case NV_DEVICE_INFO_V0_CELSIUS:
@@ -175,7 +175,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
@@ -199,7 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
if (!nvxx_device(device)->func->pci)
getparam->value = 3;
else
- if (drm_pci_device_is_agp(dev))
+ if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
getparam->value = 0;
else
if (!pci_is_pcie(dev->pdev))
@@ -321,7 +321,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
}
/* Named memory object area */
- ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
+ ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
if (ret == 0)
ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 8b1ca4add2ed..380f340204e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -65,7 +65,7 @@ static int
nv40_get_intensity(struct backlight_device *bd)
{
struct nouveau_drm *drm = bl_get_data(bd);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
NV40_PMC_BACKLIGHT_MASK) >> 16;
@@ -76,7 +76,7 @@ static int
nv40_set_intensity(struct backlight_device *bd)
{
struct nouveau_drm *drm = bl_get_data(bd);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
int val = bd->props.brightness;
int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
@@ -96,7 +96,7 @@ static int
nv40_backlight_init(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
struct backlight_properties props;
struct backlight_device *bd;
struct backlight_connector bl_connector;
@@ -133,7 +133,7 @@ nv50_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
int or = nv_encoder->or;
u32 div = 1025;
u32 val;
@@ -148,7 +148,7 @@ nv50_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
int or = nv_encoder->or;
u32 div = 1025;
u32 val = (bd->props.brightness * div) / 100;
@@ -169,7 +169,7 @@ nva3_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
int or = nv_encoder->or;
u32 div, val;
@@ -187,7 +187,7 @@ nva3_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
int or = nv_encoder->or;
u32 div, val;
@@ -213,7 +213,7 @@ static int
nv50_backlight_init(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
struct nouveau_encoder *nv_encoder;
struct backlight_properties props;
struct backlight_device *bd;
@@ -231,9 +231,9 @@ nv50_backlight_init(struct drm_connector *connector)
if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
return 0;
- if (drm->device.info.chipset <= 0xa0 ||
- drm->device.info.chipset == 0xaa ||
- drm->device.info.chipset == 0xac)
+ if (drm->client.device.info.chipset <= 0xa0 ||
+ drm->client.device.info.chipset == 0xaa ||
+ drm->client.device.info.chipset == 0xac)
ops = &nv50_bl_ops;
else
ops = &nva3_bl_ops;
@@ -265,7 +265,7 @@ int
nouveau_backlight_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
struct drm_connector *connector;
if (apple_gmux_present()) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 23ffe8571a99..9a0772ad495a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -215,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
struct nvbios *bios = &drm->vbios;
uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
uint32_t sel_clk_binding, sel_clk;
@@ -319,7 +319,7 @@ static int
get_fp_strap(struct drm_device *dev, struct nvbios *bios)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
/*
* The fp strap is normally dictated by the "User Strap" in
@@ -333,10 +333,10 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
return nvif_rd32(device, 0x001800) & 0x0000000f;
else
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
else
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
@@ -638,7 +638,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
struct nvbios *bios = &drm->vbios;
int cv = bios->chip_version;
uint16_t clktable = 0, scriptptr;
@@ -1255,7 +1255,7 @@ olddcb_table(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
u8 *dcb = NULL;
- if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT)
+ if (drm->client.device.info.family > NV_DEVICE_INFO_V0_TNT)
dcb = ROMPTR(dev, drm->vbios.data[0x36]);
if (!dcb) {
NV_WARN(drm, "No DCB data found in VBIOS\n");
@@ -1918,7 +1918,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
uint8_t bytes_to_write;
uint16_t hwsq_entry_offset;
int i;
@@ -2012,7 +2012,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->device);
+ struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
struct nvbios *legacy = &drm->vbios;
memset(legacy, 0, sizeof(struct nvbios));
@@ -2064,7 +2064,7 @@ nouveau_bios_posted(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
unsigned htotal;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return true;
htotal = NVReadVgaCrtc(dev, 0, 0x06);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index dd07ca140d12..548f36d33924 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -48,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nvkm_fb *fb = device->fb;
struct nvkm_fb_tile *tile = &fb->tile.region[i];
@@ -100,7 +100,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 flags)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_fb *fb = nvxx_fb(&drm->device);
+ struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
@@ -139,60 +139,62 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
kfree(nvbo);
}
+static inline u64
+roundup_64(u64 x, u32 y)
+{
+ x += y - 1;
+ do_div(x, y);
+ return x * y;
+}
+
static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
- int *align, int *size)
+ int *align, u64 *size)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->tile_mode) {
if (device->info.chipset >= 0x40) {
*align = 65536;
- *size = roundup(*size, 64 * nvbo->tile_mode);
+ *size = roundup_64(*size, 64 * nvbo->tile_mode);
} else if (device->info.chipset >= 0x30) {
*align = 32768;
- *size = roundup(*size, 64 * nvbo->tile_mode);
+ *size = roundup_64(*size, 64 * nvbo->tile_mode);
} else if (device->info.chipset >= 0x20) {
*align = 16384;
- *size = roundup(*size, 64 * nvbo->tile_mode);
+ *size = roundup_64(*size, 64 * nvbo->tile_mode);
} else if (device->info.chipset >= 0x10) {
*align = 16384;
- *size = roundup(*size, 32 * nvbo->tile_mode);
+ *size = roundup_64(*size, 32 * nvbo->tile_mode);
}
}
} else {
- *size = roundup(*size, (1 << nvbo->page_shift));
+ *size = roundup_64(*size, (1 << nvbo->page_shift));
*align = max((1 << nvbo->page_shift), *align);
}
- *size = roundup(*size, PAGE_SIZE);
+ *size = roundup_64(*size, PAGE_SIZE);
}
int
-nouveau_bo_new(struct drm_device *dev, int size, int align,
+nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
struct sg_table *sg, struct reservation_object *robj,
struct nouveau_bo **pnvbo)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_drm *drm = nouveau_drm(cli->dev);
struct nouveau_bo *nvbo;
size_t acc_size;
int ret;
int type = ttm_bo_type_device;
- int lpg_shift = 12;
- int max_size;
-
- if (drm->client.vm)
- lpg_shift = drm->client.vm->mmu->lpg_shift;
- max_size = INT_MAX & ~((1 << lpg_shift) - 1);
- if (size <= 0 || size > max_size) {
- NV_WARN(drm, "skipped size %x\n", (u32)size);
+ if (!size) {
+ NV_WARN(drm, "skipped size %016llx\n", size);
return -EINVAL;
}
@@ -208,8 +210,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
+ nvbo->cli = cli;
- if (!nvxx_device(&drm->device)->func->cpu_coherent)
+ if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
nvbo->page_shift = 12;
@@ -255,10 +258,10 @@ static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
+ u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
unsigned i, fpfn, lpfn;
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
@@ -316,12 +319,12 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
if (ret)
return ret;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
memtype == TTM_PL_FLAG_VRAM && contig) {
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
if (bo->mem.mem_type == TTM_PL_VRAM) {
struct nvkm_mem *mem = bo->mem.mm_node;
- if (!list_is_singular(&mem->regions))
+ if (!nvkm_mm_contiguous(mem->mem))
evict = true;
}
nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
@@ -443,7 +446,7 @@ void
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
@@ -463,7 +466,7 @@ void
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
@@ -579,9 +582,9 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
/* Some BARs do not support being ioremapped WC */
- if (nvxx_bar(&drm->device)->iomap_uncached) {
+ if (nvxx_bar(&drm->client.device)->iomap_uncached) {
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
}
@@ -594,7 +597,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
}
break;
case TTM_PL_TT:
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
man->func = &nouveau_gart_manager;
else
if (!drm->agp.bridge)
@@ -654,20 +657,20 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
static int
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *node = old_mem->mm_node;
+ struct nvkm_mem *mem = old_reg->mm_node;
int ret = RING_SPACE(chan, 10);
if (ret == 0) {
BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
- OUT_RING (chan, upper_32_bits(node->vma[0].offset));
- OUT_RING (chan, lower_32_bits(node->vma[0].offset));
- OUT_RING (chan, upper_32_bits(node->vma[1].offset));
- OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
OUT_RING (chan, PAGE_SIZE);
OUT_RING (chan, PAGE_SIZE);
OUT_RING (chan, PAGE_SIZE);
- OUT_RING (chan, new_mem->num_pages);
+ OUT_RING (chan, new_reg->num_pages);
BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
}
return ret;
@@ -686,15 +689,15 @@ nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
static int
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *node = old_mem->mm_node;
- u64 src_offset = node->vma[0].offset;
- u64 dst_offset = node->vma[1].offset;
- u32 page_count = new_mem->num_pages;
+ struct nvkm_mem *mem = old_reg->mm_node;
+ u64 src_offset = mem->vma[0].offset;
+ u64 dst_offset = mem->vma[1].offset;
+ u32 page_count = new_reg->num_pages;
int ret;
- page_count = new_mem->num_pages;
+ page_count = new_reg->num_pages;
while (page_count) {
int line_count = (page_count > 8191) ? 8191 : page_count;
@@ -724,15 +727,15 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *node = old_mem->mm_node;
- u64 src_offset = node->vma[0].offset;
- u64 dst_offset = node->vma[1].offset;
- u32 page_count = new_mem->num_pages;
+ struct nvkm_mem *mem = old_reg->mm_node;
+ u64 src_offset = mem->vma[0].offset;
+ u64 dst_offset = mem->vma[1].offset;
+ u32 page_count = new_reg->num_pages;
int ret;
- page_count = new_mem->num_pages;
+ page_count = new_reg->num_pages;
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
@@ -763,15 +766,15 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static int
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *node = old_mem->mm_node;
- u64 src_offset = node->vma[0].offset;
- u64 dst_offset = node->vma[1].offset;
- u32 page_count = new_mem->num_pages;
+ struct nvkm_mem *mem = old_reg->mm_node;
+ u64 src_offset = mem->vma[0].offset;
+ u64 dst_offset = mem->vma[1].offset;
+ u32 page_count = new_reg->num_pages;
int ret;
- page_count = new_mem->num_pages;
+ page_count = new_reg->num_pages;
while (page_count) {
int line_count = (page_count > 8191) ? 8191 : page_count;
@@ -801,35 +804,35 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static int
nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *node = old_mem->mm_node;
+ struct nvkm_mem *mem = old_reg->mm_node;
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
- OUT_RING (chan, upper_32_bits(node->vma[0].offset));
- OUT_RING (chan, lower_32_bits(node->vma[0].offset));
- OUT_RING (chan, upper_32_bits(node->vma[1].offset));
- OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
OUT_RING (chan, 0x00000000 /* COPY */);
- OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
+ OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
}
return ret;
}
static int
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *node = old_mem->mm_node;
+ struct nvkm_mem *mem = old_reg->mm_node;
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
- OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
- OUT_RING (chan, upper_32_bits(node->vma[0].offset));
- OUT_RING (chan, lower_32_bits(node->vma[0].offset));
- OUT_RING (chan, upper_32_bits(node->vma[1].offset));
- OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
+ OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
}
return ret;
@@ -853,14 +856,14 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *node = old_mem->mm_node;
- u64 length = (new_mem->num_pages << PAGE_SHIFT);
- u64 src_offset = node->vma[0].offset;
- u64 dst_offset = node->vma[1].offset;
- int src_tiled = !!node->memtype;
- int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
+ struct nvkm_mem *mem = old_reg->mm_node;
+ u64 length = (new_reg->num_pages << PAGE_SHIFT);
+ u64 src_offset = mem->vma[0].offset;
+ u64 dst_offset = mem->vma[1].offset;
+ int src_tiled = !!mem->memtype;
+ int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
int ret;
while (length) {
@@ -940,20 +943,20 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
- struct nouveau_channel *chan, struct ttm_mem_reg *mem)
+ struct nouveau_channel *chan, struct ttm_mem_reg *reg)
{
- if (mem->mem_type == TTM_PL_TT)
+ if (reg->mem_type == TTM_PL_TT)
return NvDmaTT;
return chan->vram.handle;
}
static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+ struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- u32 src_offset = old_mem->start << PAGE_SHIFT;
- u32 dst_offset = new_mem->start << PAGE_SHIFT;
- u32 page_count = new_mem->num_pages;
+ u32 src_offset = old_reg->start << PAGE_SHIFT;
+ u32 dst_offset = new_reg->start << PAGE_SHIFT;
+ u32 page_count = new_reg->num_pages;
int ret;
ret = RING_SPACE(chan, 3);
@@ -961,10 +964,10 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return ret;
BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+ OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
+ OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
- page_count = new_mem->num_pages;
+ page_count = new_reg->num_pages;
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
@@ -995,33 +998,33 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *reg)
{
- struct nvkm_mem *old_node = bo->mem.mm_node;
- struct nvkm_mem *new_node = mem->mm_node;
- u64 size = (u64)mem->num_pages << PAGE_SHIFT;
+ struct nvkm_mem *old_mem = bo->mem.mm_node;
+ struct nvkm_mem *new_mem = reg->mm_node;
+ u64 size = (u64)reg->num_pages << PAGE_SHIFT;
int ret;
- ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
- NV_MEM_ACCESS_RW, &old_node->vma[0]);
+ ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
+ NV_MEM_ACCESS_RW, &old_mem->vma[0]);
if (ret)
return ret;
- ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
- NV_MEM_ACCESS_RW, &old_node->vma[1]);
+ ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
+ NV_MEM_ACCESS_RW, &old_mem->vma[1]);
if (ret) {
- nvkm_vm_put(&old_node->vma[0]);
+ nvkm_vm_put(&old_mem->vma[0]);
return ret;
}
- nvkm_vm_map(&old_node->vma[0], old_node);
- nvkm_vm_map(&old_node->vma[1], new_node);
+ nvkm_vm_map(&old_mem->vma[0], old_mem);
+ nvkm_vm_map(&old_mem->vma[1], new_mem);
return 0;
}
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_reg)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
@@ -1033,8 +1036,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
* old nvkm_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_move_prep(drm, bo, new_mem);
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_bo_move_prep(drm, bo, new_reg);
if (ret)
return ret;
}
@@ -1042,14 +1045,14 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
if (ret == 0) {
- ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+ ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict,
- new_mem);
+ new_reg);
nouveau_fence_unref(&fence);
}
}
@@ -1124,7 +1127,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_reg)
{
struct ttm_place placement_memtype = {
.fpfn = 0,
@@ -1132,35 +1135,35 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
};
struct ttm_placement placement;
- struct ttm_mem_reg tmp_mem;
+ struct ttm_mem_reg tmp_reg;
int ret;
placement.num_placement = placement.num_busy_placement = 1;
placement.placement = placement.busy_placement = &placement_memtype;
- tmp_mem = *new_mem;
- tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
+ tmp_reg = *new_reg;
+ tmp_reg.mm_node = NULL;
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_tt_bind(bo->ttm, &tmp_mem);
+ ret = ttm_tt_bind(bo->ttm, &tmp_reg);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
out:
- ttm_bo_mem_put(bo, &tmp_mem);
+ ttm_bo_mem_put(bo, &tmp_reg);
return ret;
}
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_reg)
{
struct ttm_place placement_memtype = {
.fpfn = 0,
@@ -1168,33 +1171,34 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
};
struct ttm_placement placement;
- struct ttm_mem_reg tmp_mem;
+ struct ttm_mem_reg tmp_reg;
int ret;
placement.num_placement = placement.num_busy_placement = 1;
placement.placement = placement.busy_placement = &placement_memtype;
- tmp_mem = *new_mem;
- tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
+ tmp_reg = *new_reg;
+ tmp_reg.mm_node = NULL;
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
if (ret)
goto out;
out:
- ttm_bo_mem_put(bo, &tmp_mem);
+ ttm_bo_mem_put(bo, &tmp_reg);
return ret;
}
static void
-nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
+nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_mem_reg *new_reg)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_vma *vma;
@@ -1204,10 +1208,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
return;
list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
- (new_mem->mem_type == TTM_PL_VRAM ||
+ if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM &&
+ (new_reg->mem_type == TTM_PL_VRAM ||
nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
- nvkm_vm_map(vma, new_mem->mm_node);
+ nvkm_vm_map(vma, new_reg->mm_node);
} else {
WARN_ON(ttm_bo_wait(bo, false, false));
nvkm_vm_unmap(vma);
@@ -1216,20 +1220,20 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
}
static int
-nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
+nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
struct nouveau_drm_tile **new_tile)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- u64 offset = new_mem->start << PAGE_SHIFT;
+ u64 offset = new_reg->start << PAGE_SHIFT;
*new_tile = NULL;
- if (new_mem->mem_type != TTM_PL_VRAM)
+ if (new_reg->mem_type != TTM_PL_VRAM)
return 0;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
- *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
+ *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
nvbo->tile_mode,
nvbo->tile_flags);
}
@@ -1252,11 +1256,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_reg)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_mem_reg *old_reg = &bo->mem;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
@@ -1267,31 +1271,31 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
if (nvbo->pin_refcnt)
NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
if (ret)
return ret;
}
/* Fake bo copy. */
- if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
+ if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
BUG_ON(bo->mem.mm_node != NULL);
- bo->mem = *new_mem;
- new_mem->mm_node = NULL;
+ bo->mem = *new_reg;
+ new_reg->mm_node = NULL;
goto out;
}
/* Hardware assisted copy. */
if (drm->ttm.move) {
- if (new_mem->mem_type == TTM_PL_SYSTEM)
+ if (new_reg->mem_type == TTM_PL_SYSTEM)
ret = nouveau_bo_move_flipd(bo, evict, intr,
- no_wait_gpu, new_mem);
- else if (old_mem->mem_type == TTM_PL_SYSTEM)
+ no_wait_gpu, new_reg);
+ else if (old_reg->mem_type == TTM_PL_SYSTEM)
ret = nouveau_bo_move_flips(bo, evict, intr,
- no_wait_gpu, new_mem);
+ no_wait_gpu, new_reg);
else
ret = nouveau_bo_move_m2mf(bo, evict, intr,
- no_wait_gpu, new_mem);
+ no_wait_gpu, new_reg);
if (!ret)
goto out;
}
@@ -1299,10 +1303,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Fallback to software copy. */
ret = ttm_bo_wait(bo, intr, no_wait_gpu);
if (ret == 0)
- ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg);
out:
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (ret)
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
else
@@ -1322,54 +1326,54 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
}
static int
-nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nvkm_device *device = nvxx_device(&drm->device);
- struct nvkm_mem *node = mem->mm_node;
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_mem *mem = reg->mm_node;
int ret;
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
+ reg->bus.addr = NULL;
+ reg->bus.offset = 0;
+ reg->bus.size = reg->num_pages << PAGE_SHIFT;
+ reg->bus.base = 0;
+ reg->bus.is_iomem = false;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
- switch (mem->mem_type) {
+ switch (reg->mem_type) {
case TTM_PL_SYSTEM:
/* System memory */
return 0;
case TTM_PL_TT:
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = drm->agp.base;
- mem->bus.is_iomem = !drm->agp.cma;
+ reg->bus.offset = reg->start << PAGE_SHIFT;
+ reg->bus.base = drm->agp.base;
+ reg->bus.is_iomem = !drm->agp.cma;
}
#endif
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
/* untiled */
break;
/* fallthrough, tiled memory */
case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = device->func->resource_addr(device, 1);
- mem->bus.is_iomem = true;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- struct nvkm_bar *bar = nvxx_bar(&drm->device);
+ reg->bus.offset = reg->start << PAGE_SHIFT;
+ reg->bus.base = device->func->resource_addr(device, 1);
+ reg->bus.is_iomem = true;
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ struct nvkm_bar *bar = nvxx_bar(&drm->client.device);
int page_shift = 12;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
- page_shift = node->page_shift;
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+ page_shift = mem->page_shift;
- ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
- &node->bar_vma);
+ ret = nvkm_bar_umap(bar, mem->size << 12, page_shift,
+ &mem->bar_vma);
if (ret)
return ret;
- nvkm_vm_map(&node->bar_vma, node);
- mem->bus.offset = node->bar_vma.offset;
+ nvkm_vm_map(&mem->bar_vma, mem);
+ reg->bus.offset = mem->bar_vma.offset;
}
break;
default:
@@ -1379,15 +1383,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
}
static void
-nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
{
- struct nvkm_mem *node = mem->mm_node;
+ struct nvkm_mem *mem = reg->mm_node;
- if (!node->bar_vma.node)
+ if (!mem->bar_vma.node)
return;
- nvkm_vm_unmap(&node->bar_vma);
- nvkm_vm_put(&node->bar_vma);
+ nvkm_vm_unmap(&mem->bar_vma);
+ nvkm_vm_put(&mem->bar_vma);
}
static int
@@ -1395,7 +1399,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
int i, ret;
@@ -1403,7 +1407,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
* nothing to do here.
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
!nouveau_bo_tile_layout(nvbo))
return 0;
@@ -1418,7 +1422,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
}
/* make sure bo is in mappable vram */
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
bo->mem.start + bo->mem.num_pages < mappable)
return 0;
@@ -1460,7 +1464,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
drm = nouveau_bdev(ttm->bdev);
- device = nvxx_device(&drm->device);
+ device = nvxx_device(&drm->client.device);
dev = drm->dev;
pdev = device->dev;
@@ -1517,7 +1521,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
drm = nouveau_bdev(ttm->bdev);
- device = nvxx_device(&drm->device);
+ device = nvxx_device(&drm->client.device);
dev = drm->dev;
pdev = device->dev;
@@ -1570,8 +1574,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
- .lru_tail = &ttm_bo_default_lru_tail,
- .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
struct nvkm_vma *
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index e42360983229..b06a5385d6dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -26,6 +26,8 @@ struct nouveau_bo {
struct list_head vma_list;
unsigned page_shift;
+ struct nouveau_cli *cli;
+
u32 tile_mode;
u32 tile_flags;
struct nouveau_drm_tile *tile;
@@ -69,7 +71,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
extern struct ttm_bo_driver nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *);
-int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
+int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct reservation_object *robj,
struct nouveau_bo **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index f9b3c811187e..dbc41fa86ee8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -45,10 +45,20 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
+static int
+nouveau_channel_killed(struct nvif_notify *ntfy)
+{
+ struct nouveau_channel *chan = container_of(ntfy, typeof(*chan), kill);
+ struct nouveau_cli *cli = (void *)chan->user.client;
+ NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
+ atomic_set(&chan->killed, 1);
+ return NVIF_NOTIFY_DROP;
+}
+
int
nouveau_channel_idle(struct nouveau_channel *chan)
{
- if (likely(chan && chan->fence)) {
+ if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_fence *fence = NULL;
int ret;
@@ -78,6 +88,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nvif_object_fini(&chan->nvsw);
nvif_object_fini(&chan->gart);
nvif_object_fini(&chan->vram);
+ nvif_notify_fini(&chan->kill);
nvif_object_fini(&chan->user);
nvif_object_fini(&chan->push.ctxdma);
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
@@ -107,13 +118,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
chan->device = device;
chan->drm = drm;
+ atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */
target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
if (nouveau_vram_pushbuf)
target = TTM_PL_FLAG_VRAM;
- ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
+ ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
&chan->push.buffer);
if (ret == 0) {
ret = nouveau_bo_pin(chan->push.buffer, target, false);
@@ -301,12 +313,26 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
struct nvif_device *device = chan->device;
struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_drm *drm = chan->drm;
struct nvkm_mmu *mmu = nvxx_mmu(device);
struct nv_dma_v0 args = {};
int ret, i;
nvif_object_map(&chan->user);
+ if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
+ ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
+ true, NV906F_V0_NTFY_KILLED,
+ NULL, 0, 0, &chan->kill);
+ if (ret == 0)
+ ret = nvif_notify_get(&chan->kill);
+ if (ret) {
+ NV_ERROR(drm, "Failed to request channel kill "
+ "notification: %d\n", ret);
+ return ret;
+ }
+ }
+
/* allocate dma objects to cover all allowed vram, and gart */
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 48062c94f36d..46b947ba1cf4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -1,7 +1,7 @@
#ifndef __NOUVEAU_CHAN_H__
#define __NOUVEAU_CHAN_H__
-
#include <nvif/object.h>
+#include <nvif/notify.h>
struct nvif_device;
struct nouveau_channel {
@@ -38,6 +38,9 @@ struct nouveau_channel {
u32 user_put;
struct nvif_object user;
+
+ struct nvif_notify kill;
+ atomic_t killed;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 947c200655b4..f5add64c093f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -33,6 +33,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic.h>
#include "nouveau_reg.h"
#include "nouveau_drv.h"
@@ -418,7 +419,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int i, panel = -ENODEV;
@@ -520,7 +521,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
return;
nv_connector->detected_encoder = nv_encoder;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
} else
@@ -530,8 +531,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed = false;
} else {
connector->doublescan_allowed = true;
- if (drm->device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
- (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
+ (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
(dev->pdev->device & 0x0ff0) != 0x0100 &&
(dev->pdev->device & 0x0ff0) != 0x0150))
/* HW is broken */
@@ -769,7 +770,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
int ret;
- if (connector->dev->mode_config.funcs->atomic_commit)
+ if (drm_drv_uses_atomic_modeset(connector->dev))
return drm_atomic_helper_connector_set_property(connector, property, value);
ret = connector->funcs->atomic_set_property(&nv_connector->base,
@@ -983,17 +984,17 @@ get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi)
/* Note: these limits are conservative, some Fermi's
* can do 297 MHz. Unclear how this can be determined.
*/
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_KEPLER)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KEPLER)
return 297000;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
return 225000;
}
if (dcb->location != DCB_LOC_ON_CHIP ||
- drm->device.info.chipset >= 0x46)
+ drm->client.device.info.chipset >= 0x46)
return 165000;
- else if (drm->device.info.chipset >= 0x40)
+ else if (drm->client.device.info.chipset >= 0x40)
return 155000;
- else if (drm->device.info.chipset >= 0x18)
+ else if (drm->client.device.info.chipset >= 0x18)
return 135000;
else
return 112000;
@@ -1040,7 +1041,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
clock = clock * (connector->display_info.bpc * 3) / 10;
break;
default:
- BUG_ON(1);
+ BUG();
return MODE_BAD;
}
@@ -1074,7 +1075,7 @@ nouveau_connector_helper_funcs = {
static int
nouveau_connector_dpms(struct drm_connector *connector, int mode)
{
- if (connector->dev->mode_config.funcs->atomic_commit)
+ if (drm_drv_uses_atomic_modeset(connector->dev))
return drm_atomic_helper_connector_dpms(connector, mode);
return drm_helper_connector_dpms(connector, mode);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 096983c42a1f..a4d1a059bd3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -30,6 +30,7 @@
#include <nvif/notify.h>
#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
#include "nouveau_crtc.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 411c12cdb249..fd64dfdc7d4f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -259,8 +259,9 @@ nouveau_debugfs_init(struct nouveau_drm *drm)
if (!drm->debugfs)
return -ENOMEM;
- ret = nvif_object_init(&drm->device.object, 0, NVIF_CLASS_CONTROL,
- NULL, 0, &drm->debugfs->ctrl);
+ ret = nvif_object_init(&drm->client.device.object, 0,
+ NVIF_CLASS_CONTROL, NULL, 0,
+ &drm->debugfs->ctrl);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6a157763dfc3..72fdba1a1c5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -58,27 +58,30 @@ int
nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
{
struct drm_crtc *crtc;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- if (nv_crtc->index == pipe) {
- nvif_notify_get(&nv_crtc->vblank);
- return 0;
- }
- }
- return -EINVAL;
+ struct nouveau_crtc *nv_crtc;
+
+ crtc = drm_crtc_from_index(dev, pipe);
+ if (!crtc)
+ return -EINVAL;
+
+ nv_crtc = nouveau_crtc(crtc);
+ nvif_notify_get(&nv_crtc->vblank);
+
+ return 0;
}
void
nouveau_display_vblank_disable(struct drm_device *dev, unsigned int pipe)
{
struct drm_crtc *crtc;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- if (nv_crtc->index == pipe) {
- nvif_notify_put(&nv_crtc->vblank);
- return;
- }
- }
+ struct nouveau_crtc *nv_crtc;
+
+ crtc = drm_crtc_from_index(dev, pipe);
+ if (!crtc)
+ return;
+
+ nv_crtc = nouveau_crtc(crtc);
+ nvif_notify_put(&nv_crtc->vblank);
}
static inline int
@@ -162,7 +165,7 @@ nouveau_display_vblstamp(struct drm_device *dev, unsigned int pipe,
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (nouveau_crtc(crtc)->index == pipe) {
struct drm_display_mode *mode;
- if (dev->mode_config.funcs->atomic_commit)
+ if (drm_drv_uses_atomic_modeset(dev))
mode = &crtc->state->adjusted_mode;
else
mode = &crtc->hwmode;
@@ -259,7 +262,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
fb->nvbo = nvbo;
ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
@@ -493,7 +496,7 @@ int
nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nouveau_display *disp;
int ret;
@@ -510,15 +513,15 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
} else
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else
- if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) {
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) {
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
} else {
@@ -529,7 +532,7 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- if (drm->device.info.chipset < 0x11)
+ if (drm->client.device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
else
dev->mode_config.async_page_flip = true;
@@ -556,7 +559,7 @@ nouveau_display_create(struct drm_device *dev)
int i;
for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
- ret = nvif_object_init(&drm->device.object, 0,
+ ret = nvif_object_init(&drm->client.device.object, 0,
oclass[i], NULL, 0, &disp->disp);
}
@@ -739,7 +742,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
struct nouveau_display *disp = nouveau_display(dev);
struct drm_crtc *crtc;
- if (dev->mode_config.funcs->atomic_commit) {
+ if (drm_drv_uses_atomic_modeset(dev)) {
if (!runtime) {
disp->suspend = nouveau_atomic_suspend(dev);
if (IS_ERR(disp->suspend)) {
@@ -785,7 +788,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
struct drm_crtc *crtc;
int ret;
- if (dev->mode_config.funcs->atomic_commit) {
+ if (drm_drv_uses_atomic_modeset(dev)) {
nouveau_display_init(dev);
if (disp->suspend) {
drm_atomic_helper_resume(dev, disp->suspend);
@@ -948,7 +951,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
- { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0],
+ { { }, event, crtc, fb->format->cpp[0] * 8, fb->pitches[0],
new_bo->bo.offset };
/* Keep vblanks on during flip, for the target crtc of this flip */
@@ -1055,6 +1058,7 @@ int
nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *bo;
uint32_t domain;
int ret;
@@ -1064,12 +1068,12 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
args->size = roundup(args->size, PAGE_SIZE);
/* Use VRAM if there is any ; otherwise fallback to system memory */
- if (nouveau_drm(dev)->device.info.ram_size != 0)
+ if (nouveau_drm(dev)->client.device.info.ram_size != 0)
domain = NOUVEAU_GEM_DOMAIN_VRAM;
else
domain = NOUVEAU_GEM_DOMAIN_GART;
- ret = nouveau_gem_new(dev, args->size, 0, domain, 0, 0, &bo);
+ ret = nouveau_gem_new(cli, args->size, 0, domain, 0, 0, &bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index bc85a45f91cd..468ed1d3bb26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -37,6 +37,8 @@
#include <core/pci.h>
#include <core/tegra.h>
+#include <nvif/driver.h>
+
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvif/cla06f.h>
@@ -109,35 +111,53 @@ nouveau_name(struct drm_device *dev)
return nouveau_platform_name(dev->platformdev);
}
+static void
+nouveau_cli_fini(struct nouveau_cli *cli)
+{
+ nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
+ usif_client_fini(cli);
+ nvif_device_fini(&cli->device);
+ nvif_client_fini(&cli->base);
+}
+
static int
-nouveau_cli_create(struct drm_device *dev, const char *sname,
- int size, void **pcli)
+nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
+ struct nouveau_cli *cli)
{
- struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
+ u64 device = nouveau_name(drm->dev);
int ret;
- if (cli) {
- snprintf(cli->name, sizeof(cli->name), "%s", sname);
- cli->dev = dev;
- ret = nvif_client_init(NULL, cli->name, nouveau_name(dev),
- nouveau_config, nouveau_debug,
+ snprintf(cli->name, sizeof(cli->name), "%s", sname);
+ cli->dev = drm->dev;
+ mutex_init(&cli->mutex);
+ usif_client_init(cli);
+
+ if (cli == &drm->client) {
+ ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
+ cli->name, device, &cli->base);
+ } else {
+ ret = nvif_client_init(&drm->client.base, cli->name, device,
&cli->base);
- if (ret == 0) {
- mutex_init(&cli->mutex);
- usif_client_init(cli);
- }
- return ret;
}
- return -ENOMEM;
-}
+ if (ret) {
+ NV_ERROR(drm, "Client allocation failed: %d\n", ret);
+ goto done;
+ }
-static void
-nouveau_cli_destroy(struct nouveau_cli *cli)
-{
- nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
- nvif_client_fini(&cli->base);
- usif_client_fini(cli);
- kfree(cli);
+ ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE,
+ &(struct nv_device_v0) {
+ .device = ~0,
+ }, sizeof(struct nv_device_v0),
+ &cli->device);
+ if (ret) {
+ NV_ERROR(drm, "Device allocation failed: %d\n", ret);
+ goto done;
+ }
+
+done:
+ if (ret)
+ nouveau_cli_fini(cli);
+ return ret;
}
static void
@@ -161,7 +181,7 @@ nouveau_accel_fini(struct nouveau_drm *drm)
static void
nouveau_accel_init(struct nouveau_drm *drm)
{
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
struct nvif_sclass *sclass;
u32 arg0, arg1;
int ret, i, n;
@@ -215,7 +235,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
}
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
- ret = nouveau_channel_new(drm, &drm->device,
+ ret = nouveau_channel_new(drm, &drm->client.device,
NVA06F_V0_ENGINE_CE0 |
NVA06F_V0_ENGINE_CE1,
0, &drm->cechan);
@@ -228,7 +248,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
if (device->info.chipset >= 0xa3 &&
device->info.chipset != 0xaa &&
device->info.chipset != 0xac) {
- ret = nouveau_channel_new(drm, &drm->device,
+ ret = nouveau_channel_new(drm, &drm->client.device,
NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
@@ -240,7 +260,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
arg1 = NvDmaTT;
}
- ret = nouveau_channel_new(drm, &drm->device, arg0, arg1, &drm->channel);
+ ret = nouveau_channel_new(drm, &drm->client.device,
+ arg0, arg1, &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_fini(drm);
@@ -280,8 +301,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
}
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
- ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
- NULL, &drm->notify);
+ ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0,
+ false, NULL, &drm->notify);
if (ret) {
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
nouveau_accel_fini(drm);
@@ -407,12 +428,17 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
struct nouveau_drm *drm;
int ret;
- ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm);
+ if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
+ return -ENOMEM;
+ dev->dev_private = drm;
+ drm->dev = dev;
+
+ ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
return ret;
- dev->dev_private = drm;
- drm->dev = dev;
+ dev->irq_enabled = true;
+
nvxx_client(&drm->client.base)->debug =
nvkm_dbgopt(nouveau_debug, "DRM");
@@ -421,33 +447,24 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_get_hdmi_dev(drm);
- ret = nvif_device_init(&drm->client.base.object, 0, NV_DEVICE,
- &(struct nv_device_v0) {
- .device = ~0,
- }, sizeof(struct nv_device_v0),
- &drm->device);
- if (ret)
- goto fail_device;
-
- dev->irq_enabled = true;
-
/* workaround an odd issue on nvc1 by disabling the device's
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
*/
- if (drm->device.info.chipset == 0xc1)
- nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000);
+ if (drm->client.device.info.chipset == 0xc1)
+ nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000);
nouveau_vga_init(drm);
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- if (!nvxx_device(&drm->device)->mmu) {
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ if (!nvxx_device(&drm->client.device)->mmu) {
ret = -ENOSYS;
goto fail_device;
}
- ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
- 0x1000, NULL, &drm->client.vm);
+ ret = nvkm_vm_new(nvxx_device(&drm->client.device),
+ 0, (1ULL << 40), 0x1000, NULL,
+ &drm->client.vm);
if (ret)
goto fail_device;
@@ -497,12 +514,12 @@ fail_bios:
fail_ttm:
nouveau_vga_fini(drm);
fail_device:
- nvif_device_fini(&drm->device);
- nouveau_cli_destroy(&drm->client);
+ nouveau_cli_fini(&drm->client);
+ kfree(drm);
return ret;
}
-static int
+static void
nouveau_drm_unload(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -527,11 +544,10 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_ttm_fini(drm);
nouveau_vga_fini(drm);
- nvif_device_fini(&drm->device);
if (drm->hdmi_device)
pci_dev_put(drm->hdmi_device);
- nouveau_cli_destroy(&drm->client);
- return 0;
+ nouveau_cli_fini(&drm->client);
+ kfree(drm);
}
void
@@ -561,7 +577,6 @@ static int
nouveau_do_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_cli *cli;
int ret;
nouveau_led_suspend(dev);
@@ -591,7 +606,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
goto fail_display;
}
- NV_INFO(drm, "suspending client object trees...\n");
+ NV_INFO(drm, "suspending fence...\n");
if (drm->fence && nouveau_fence(drm)->suspend) {
if (!nouveau_fence(drm)->suspend(drm)) {
ret = -ENOMEM;
@@ -599,13 +614,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
}
- list_for_each_entry(cli, &drm->clients, head) {
- ret = nvif_client_suspend(&cli->base);
- if (ret)
- goto fail_client;
- }
-
- NV_INFO(drm, "suspending kernel object tree...\n");
+ NV_INFO(drm, "suspending object tree...\n");
ret = nvif_client_suspend(&drm->client.base);
if (ret)
goto fail_client;
@@ -613,10 +622,6 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
return 0;
fail_client:
- list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
- nvif_client_resume(&cli->base);
- }
-
if (drm->fence && nouveau_fence(drm)->resume)
nouveau_fence(drm)->resume(drm);
@@ -632,19 +637,14 @@ static int
nouveau_do_resume(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_cli *cli;
- NV_INFO(drm, "resuming kernel object tree...\n");
+ NV_INFO(drm, "resuming object tree...\n");
nvif_client_resume(&drm->client.base);
- NV_INFO(drm, "resuming client object trees...\n");
+ NV_INFO(drm, "resuming fence...\n");
if (drm->fence && nouveau_fence(drm)->resume)
nouveau_fence(drm)->resume(drm);
- list_for_each_entry(cli, &drm->clients, head) {
- nvif_client_resume(&cli->base);
- }
-
nouveau_run_vbios_init(dev);
if (dev->mode_config.num_crtc) {
@@ -759,7 +759,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nvif_device *device = &nouveau_drm(drm_dev)->device;
+ struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
int ret;
if (nouveau_runtime_pm == 0)
@@ -845,20 +845,20 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
- ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli);
+ if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL)))
+ return ret;
+ ret = nouveau_cli_init(drm, name, cli);
if (ret)
- goto out_suspend;
+ goto done;
cli->base.super = false;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
- 0x1000, NULL, &cli->vm);
- if (ret) {
- nouveau_cli_destroy(cli);
- goto out_suspend;
- }
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0,
+ (1ULL << 40), 0x1000, NULL, &cli->vm);
+ if (ret)
+ goto done;
nvxx_client(&cli->base)->vm = cli->vm;
}
@@ -869,10 +869,14 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
list_add(&cli->head, &drm->clients);
mutex_unlock(&drm->client.mutex);
-out_suspend:
+done:
+ if (ret && cli) {
+ nouveau_cli_fini(cli);
+ kfree(cli);
+ }
+
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
-
return ret;
}
@@ -899,7 +903,8 @@ static void
nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
{
struct nouveau_cli *cli = nouveau_cli(fpriv);
- nouveau_cli_destroy(cli);
+ nouveau_cli_fini(cli);
+ kfree(cli);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 42c1fa53d431..eadec2f49ad3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -86,14 +86,17 @@ enum nouveau_drm_handle {
struct nouveau_cli {
struct nvif_client base;
+ struct drm_device *dev;
+ struct mutex mutex;
+
+ struct nvif_device device;
+
struct nvkm_vm *vm; /*XXX*/
struct list_head head;
- struct mutex mutex;
void *abi16;
struct list_head objects;
struct list_head notifys;
char name[32];
- struct drm_device *dev;
};
static inline struct nouveau_cli *
@@ -111,7 +114,6 @@ struct nouveau_drm {
struct nouveau_cli client;
struct drm_device *dev;
- struct nvif_device device;
struct list_head clients;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index fa2d0a978ccc..442e25c17383 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -41,6 +41,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_atomic.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
@@ -59,7 +60,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -91,7 +92,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
{
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -123,7 +124,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -265,10 +266,10 @@ nouveau_fbcon_accel_init(struct drm_device *dev)
struct fb_info *info = fbcon->helper.fbdev;
int ret;
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
ret = nv04_fbcon_accel_init(info);
else
- if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
ret = nv50_fbcon_accel_init(info);
else
ret = nvc0_fbcon_accel_init(info);
@@ -323,7 +324,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
container_of(helper, struct nouveau_fbdev, helper);
struct drm_device *dev = fbcon->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
struct fb_info *info;
struct nouveau_framebuffer *fb;
struct nouveau_channel *chan;
@@ -340,8 +341,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- ret = nouveau_gem_new(dev, mode_cmd.pitches[0] * mode_cmd.height,
- 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
+ ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] *
+ mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM,
+ 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(drm, "failed to allocate framebuffer\n");
goto out;
@@ -400,7 +402,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- drm_fb_helper_fill_fix(info, fb->base.pitches[0], fb->base.depth);
+ drm_fb_helper_fill_fix(info, fb->base.pitches[0],
+ fb->base.format->depth);
drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -530,8 +533,7 @@ nouveau_fbcon_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
- ret = drm_fb_helper_init(dev, &fbcon->helper,
- dev->mode_config.num_crtc, 4);
+ ret = drm_fb_helper_init(dev, &fbcon->helper, 4);
if (ret)
goto free;
@@ -539,16 +541,16 @@ nouveau_fbcon_init(struct drm_device *dev)
if (ret)
goto fini;
- if (drm->device.info.ram_size <= 32 * 1024 * 1024)
+ if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
preferred_bpp = 8;
else
- if (drm->device.info.ram_size <= 64 * 1024 * 1024)
+ if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
preferred_bpp = 16;
else
preferred_bpp = 32;
/* disable all the possible outputs/crtcs before entering KMS mode */
- if (!dev->mode_config.funcs->atomic_commit)
+ if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 88ee60d1b907..99e14e3e0fe4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -190,7 +190,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
return;
ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
- false, G82_CHANNEL_DMA_V0_NTFY_UEVENT,
+ false, NV826E_V0_NTFY_NON_STALL_INTERRUPT,
&(struct nvif_notify_uevent_req) { },
sizeof(struct nvif_notify_uevent_req),
sizeof(struct nvif_notify_uevent_rep),
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 201b52b750dd..ca5397beb357 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -175,11 +175,11 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
}
int
-nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
+nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
struct nouveau_bo **pnvbo)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_drm *drm = nouveau_drm(cli->dev);
struct nouveau_bo *nvbo;
u32 flags = 0;
int ret;
@@ -194,7 +194,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
flags |= TTM_PL_FLAG_UNCACHED;
- ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
+ ret = nouveau_bo_new(cli, size, align, flags, tile_mode,
tile_flags, NULL, NULL, pnvbo);
if (ret)
return ret;
@@ -206,12 +206,12 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
- ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+ ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size);
if (ret) {
nouveau_bo_ref(NULL, pnvbo);
return -ENOMEM;
@@ -257,7 +257,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct nvkm_fb *fb = nvxx_fb(&drm->device);
+ struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
@@ -267,7 +267,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = nouveau_gem_new(dev, req->info.size, req->align,
+ ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
if (ret)
@@ -496,7 +496,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
return ret;
}
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -767,7 +767,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
push[i].length);
}
} else
- if (drm->device.info.chipset >= 0x25) {
+ if (drm->client.device.info.chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_PRINTK(err, cli, "cal_space: %d\n", ret);
@@ -840,7 +840,7 @@ out_next:
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
- if (drm->device.info.chipset >= 0x25) {
+ if (drm->client.device.info.chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 7e32da2e037a..8fa6ed9ddd3a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -16,7 +16,7 @@ nouveau_gem_object(struct drm_gem_object *gem)
}
/* nouveau_gem.c */
-extern int nouveau_gem_new(struct drm_device *, int size, int align,
+extern int nouveau_gem_new(struct nouveau_cli *, u64 size, int align,
uint32_t domain, uint32_t tile_mode,
uint32_t tile_flags, struct nouveau_bo **);
extern void nouveau_gem_object_del(struct drm_gem_object *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 71f764bf4cc6..23b1670c1c2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -43,7 +43,7 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
int temp = nvkm_therm_temp_get(therm);
if (temp < 0)
@@ -69,7 +69,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
@@ -81,7 +81,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -102,7 +102,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
@@ -114,7 +114,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -134,7 +134,7 @@ nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK) * 1000);
@@ -145,7 +145,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -165,7 +165,7 @@ nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
@@ -176,7 +176,7 @@ nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -197,7 +197,7 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL) * 1000);
@@ -209,7 +209,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -230,7 +230,7 @@ nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
@@ -243,7 +243,7 @@ nouveau_hwmon_set_critical_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -263,7 +263,7 @@ nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN) * 1000);
@@ -275,7 +275,7 @@ nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -296,7 +296,7 @@ nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
@@ -309,7 +309,7 @@ nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -349,7 +349,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm));
}
@@ -362,7 +362,7 @@ nouveau_hwmon_get_pwm1_enable(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MODE);
@@ -378,7 +378,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
int ret;
@@ -401,7 +401,7 @@ nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
int ret;
ret = therm->fan_get(therm);
@@ -417,7 +417,7 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
int ret = -ENODEV;
long value;
@@ -441,7 +441,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY);
@@ -457,7 +457,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
int ret;
@@ -481,7 +481,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY);
@@ -497,7 +497,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
long value;
int ret;
@@ -521,7 +521,7 @@ nouveau_hwmon_get_in0_input(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_volt *volt = nvxx_volt(&drm->device);
+ struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
int ret;
ret = nvkm_volt_get(volt);
@@ -540,7 +540,7 @@ nouveau_hwmon_get_in0_min(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_volt *volt = nvxx_volt(&drm->device);
+ struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
if (!volt || !volt->min_uv)
return -ENODEV;
@@ -557,7 +557,7 @@ nouveau_hwmon_get_in0_max(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_volt *volt = nvxx_volt(&drm->device);
+ struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
if (!volt || !volt->max_uv)
return -ENODEV;
@@ -584,7 +584,7 @@ nouveau_hwmon_get_power1_input(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
int result = nvkm_iccsense_read_all(iccsense);
if (result < 0)
@@ -596,6 +596,32 @@ nouveau_hwmon_get_power1_input(struct device *d, struct device_attribute *a,
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO,
nouveau_hwmon_get_power1_input, NULL, 0);
+static ssize_t
+nouveau_hwmon_get_power1_max(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
+ return sprintf(buf, "%i\n", iccsense->power_w_max);
+}
+
+static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO,
+ nouveau_hwmon_get_power1_max, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_power1_crit(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
+ return sprintf(buf, "%i\n", iccsense->power_w_crit);
+}
+
+static SENSOR_DEVICE_ATTR(power1_crit, S_IRUGO,
+ nouveau_hwmon_get_power1_crit, NULL, 0);
+
static struct attribute *hwmon_default_attributes[] = {
&sensor_dev_attr_name.dev_attr.attr,
&sensor_dev_attr_update_rate.dev_attr.attr,
@@ -639,6 +665,12 @@ static struct attribute *hwmon_power_attributes[] = {
NULL
};
+static struct attribute *hwmon_power_caps_attributes[] = {
+ &sensor_dev_attr_power1_max.dev_attr.attr,
+ &sensor_dev_attr_power1_crit.dev_attr.attr,
+ NULL
+};
+
static const struct attribute_group hwmon_default_attrgroup = {
.attrs = hwmon_default_attributes,
};
@@ -657,6 +689,9 @@ static const struct attribute_group hwmon_in0_attrgroup = {
static const struct attribute_group hwmon_power_attrgroup = {
.attrs = hwmon_power_attributes,
};
+static const struct attribute_group hwmon_power_caps_attrgroup = {
+ .attrs = hwmon_power_caps_attributes,
+};
#endif
int
@@ -664,9 +699,9 @@ nouveau_hwmon_init(struct drm_device *dev)
{
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->device);
- struct nvkm_volt *volt = nvxx_volt(&drm->device);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device);
+ struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
struct nouveau_hwmon *hwmon;
struct device *hwmon_dev;
int ret = 0;
@@ -728,8 +763,16 @@ nouveau_hwmon_init(struct drm_device *dev)
if (iccsense && iccsense->data_valid && !list_empty(&iccsense->rails)) {
ret = sysfs_create_group(&hwmon_dev->kobj,
&hwmon_power_attrgroup);
+
if (ret)
goto error;
+
+ if (iccsense->power_w_max && iccsense->power_w_crit) {
+ ret = sysfs_create_group(&hwmon_dev->kobj,
+ &hwmon_power_caps_attrgroup);
+ if (ret)
+ goto error;
+ }
}
hwmon->hwmon = hwmon_dev;
@@ -759,6 +802,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_in0_attrgroup);
sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_attrgroup);
+ sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_caps_attrgroup);
hwmon_device_unregister(hwmon->hwmon);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
index 3e2f1b6cd4df..2c5e0628da12 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.c
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -38,7 +38,7 @@ nouveau_led_get_brightness(struct led_classdev *led)
{
struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
u32 div, duty;
div = nvif_rd32(device, 0x61c880) & 0x00ffffff;
@@ -55,7 +55,7 @@ nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value)
{
struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */
u32 freq = 100; /* this is what nvidia uses and it should be good-enough */
@@ -78,7 +78,7 @@ int
nouveau_led_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
struct dcb_gpio_func logo_led;
int ret;
@@ -102,6 +102,7 @@ nouveau_led_init(struct drm_device *dev)
ret = led_classdev_register(dev->dev, &drm->led->led);
if (ret) {
kfree(drm->led);
+ drm->led = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 15f0925ea13b..b3f29b1ce9ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -60,20 +60,15 @@ nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
static int
nvkm_client_resume(void *priv)
{
- return nvkm_client_init(priv);
+ struct nvkm_client *client = priv;
+ return nvkm_object_init(&client->object);
}
static int
nvkm_client_suspend(void *priv)
{
- return nvkm_client_fini(priv, true);
-}
-
-static void
-nvkm_client_driver_fini(void *priv)
-{
struct nvkm_client *client = priv;
- nvkm_client_del(&client);
+ return nvkm_object_fini(&client->object, true);
}
static int
@@ -108,23 +103,14 @@ static int
nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
const char *dbg, void **ppriv)
{
- struct nvkm_client *client;
- int ret;
-
- ret = nvkm_client_new(name, device, cfg, dbg, &client);
- *ppriv = client;
- if (ret)
- return ret;
-
- client->ntfy = nvkm_client_ntfy;
- return 0;
+ return nvkm_client_new(name, device, cfg, dbg, nvkm_client_ntfy,
+ (struct nvkm_client **)ppriv);
}
const struct nvif_driver
nvif_driver_nvkm = {
.name = "nvkm",
.init = nvkm_client_driver_init,
- .fini = nvkm_client_driver_fini,
.suspend = nvkm_client_suspend,
.resume = nvkm_client_resume,
.ioctl = nvkm_client_ioctl,
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index a0a9704cfe2b..1fefc93af1d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -60,6 +60,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo;
struct reservation_object *robj = attach->dmabuf->resv;
u32 flags = 0;
@@ -68,7 +69,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
flags = TTM_PL_FLAG_TT;
ww_mutex_lock(&robj->lock, NULL);
- ret = nouveau_bo_new(dev, attach->dmabuf->size, 0, flags, 0, 0,
+ ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
sg, robj, &nvbo);
ww_mutex_unlock(&robj->lock);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index db35ab5883ac..b7ab268f7d6f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -24,10 +24,10 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
}
static int
-nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct nvkm_mem *node = mem->mm_node;
+ struct nvkm_mem *node = reg->mm_node;
if (ttm->sg) {
node->sg = ttm->sg;
@@ -36,7 +36,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
node->sg = NULL;
node->pages = nvbe->ttm.dma_address;
}
- node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
+ node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
nvkm_vm_map(&node->vma[0], node);
nvbe->node = node;
@@ -58,10 +58,10 @@ static struct ttm_backend_func nv04_sgdma_backend = {
};
static int
-nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct nvkm_mem *node = mem->mm_node;
+ struct nvkm_mem *node = reg->mm_node;
/* noop: bound in move_notify() */
if (ttm->sg) {
@@ -71,7 +71,7 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
node->sg = NULL;
node->pages = nvbe->ttm.dma_address;
}
- node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
+ node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
return 0;
}
@@ -100,7 +100,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
if (!nvbe)
return NULL;
- if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
nvbe->ttm.ttm.func = &nv04_sgdma_backend;
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index a6dbe8258040..13e5cc5f07fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -36,7 +36,7 @@ static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_fb *fb = nvxx_fb(&drm->device);
+ struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
man->priv = fb;
return 0;
}
@@ -64,53 +64,53 @@ nvkm_mem_node_cleanup(struct nvkm_mem *node)
static void
nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *reg)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
- nvkm_mem_node_cleanup(mem->mm_node);
- ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
+ struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
+ nvkm_mem_node_cleanup(reg->mm_node);
+ ram->func->put(ram, (struct nvkm_mem **)&reg->mm_node);
}
static int
nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *reg)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
+ struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_mem *node;
u32 size_nc = 0;
int ret;
- if (drm->device.info.ram_size == 0)
+ if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
size_nc = 1 << nvbo->page_shift;
- ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
- mem->page_alignment << PAGE_SHIFT, size_nc,
+ ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT,
+ reg->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) {
- mem->mm_node = NULL;
+ reg->mm_node = NULL;
return (ret == -ENOSPC) ? 0 : ret;
}
node->page_shift = nvbo->page_shift;
- mem->mm_node = node;
- mem->start = node->offset >> PAGE_SHIFT;
+ reg->mm_node = node;
+ reg->start = node->offset >> PAGE_SHIFT;
return 0;
}
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
- nouveau_vram_manager_init,
- nouveau_vram_manager_fini,
- nouveau_vram_manager_new,
- nouveau_vram_manager_del,
+ .init = nouveau_vram_manager_init,
+ .takedown = nouveau_vram_manager_fini,
+ .get_node = nouveau_vram_manager_new,
+ .put_node = nouveau_vram_manager_del,
};
static int
@@ -127,18 +127,18 @@ nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *reg)
{
- nvkm_mem_node_cleanup(mem->mm_node);
- kfree(mem->mm_node);
- mem->mm_node = NULL;
+ nvkm_mem_node_cleanup(reg->mm_node);
+ kfree(reg->mm_node);
+ reg->mm_node = NULL;
}
static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -150,7 +150,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
- switch (drm->device.info.family) {
+ switch (drm->client.device.info.family) {
case NV_DEVICE_INFO_V0_TNT:
case NV_DEVICE_INFO_V0_CELSIUS:
case NV_DEVICE_INFO_V0_KELVIN:
@@ -158,7 +158,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
case NV_DEVICE_INFO_V0_CURIE:
break;
case NV_DEVICE_INFO_V0_TESLA:
- if (drm->device.info.chipset != 0x50)
+ if (drm->client.device.info.chipset != 0x50)
node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
break;
case NV_DEVICE_INFO_V0_FERMI:
@@ -169,12 +169,12 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
break;
default:
NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
- drm->device.info.family);
+ drm->client.device.info.family);
break;
}
- mem->mm_node = node;
- mem->start = 0;
+ reg->mm_node = node;
+ reg->start = 0;
return 0;
}
@@ -184,11 +184,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
}
const struct ttm_mem_type_manager_func nouveau_gart_manager = {
- nouveau_gart_manager_init,
- nouveau_gart_manager_fini,
- nouveau_gart_manager_new,
- nouveau_gart_manager_del,
- nouveau_gart_manager_debug
+ .init = nouveau_gart_manager_init,
+ .takedown = nouveau_gart_manager_fini,
+ .get_node = nouveau_gart_manager_new,
+ .put_node = nouveau_gart_manager_del,
+ .debug = nouveau_gart_manager_debug
};
/*XXX*/
@@ -197,7 +197,7 @@ static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
+ struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
struct nv04_mmu *priv = (void *)mmu;
struct nvkm_vm *vm = NULL;
nvkm_vm_ref(priv->vm, &vm, NULL);
@@ -215,20 +215,20 @@ nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
}
static void
-nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
+nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
{
- struct nvkm_mem *node = mem->mm_node;
+ struct nvkm_mem *node = reg->mm_node;
if (node->vma[0].node)
nvkm_vm_put(&node->vma[0]);
- kfree(mem->mm_node);
- mem->mm_node = NULL;
+ kfree(reg->mm_node);
+ reg->mm_node = NULL;
}
static int
nv04_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *reg)
{
struct nvkm_mem *node;
int ret;
@@ -239,15 +239,15 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
- ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
+ ret = nvkm_vm_get(man->priv, reg->num_pages << 12, node->page_shift,
NV_MEM_ACCESS_RW, &node->vma[0]);
if (ret) {
kfree(node);
return ret;
}
- mem->mm_node = node;
- mem->start = node->vma[0].offset >> PAGE_SHIFT;
+ reg->mm_node = node;
+ reg->start = node->vma[0].offset >> PAGE_SHIFT;
return 0;
}
@@ -257,11 +257,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
}
const struct ttm_mem_type_manager_func nv04_gart_manager = {
- nv04_gart_manager_init,
- nv04_gart_manager_fini,
- nv04_gart_manager_new,
- nv04_gart_manager_del,
- nv04_gart_manager_debug
+ .init = nv04_gart_manager_init,
+ .takedown = nv04_gart_manager_fini,
+ .get_node = nv04_gart_manager_new,
+ .put_node = nv04_gart_manager_del,
+ .debug = nv04_gart_manager_debug
};
int
@@ -339,7 +339,7 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
- struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nvkm_pci *pci = device->pci;
struct drm_device *dev = drm->dev;
u8 bits;
@@ -352,8 +352,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->agp.cma = pci->agp.cma;
}
- bits = nvxx_mmu(&drm->device)->dma_bits;
- if (nvxx_device(&drm->device)->func->pci) {
+ bits = nvxx_mmu(&drm->client.device)->dma_bits;
+ if (nvxx_device(&drm->client.device)->func->pci) {
if (drm->agp.bridge)
bits = 32;
} else if (device->func->tegra) {
@@ -396,7 +396,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}
/* VRAM init */
- drm->gem.vram_available = drm->device.info.ram_user;
+ drm->gem.vram_available = drm->client.device.info.ram_user;
arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
device->func->resource_size(device, 1));
@@ -413,7 +413,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* GART init */
if (!drm->agp.bridge) {
- drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
+ drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit;
} else {
drm->gem.gart_available = drm->agp.size;
}
@@ -433,7 +433,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
- struct nvkm_device *device = nvxx_device(&drm->device);
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 1fba38622744..afbdbed1a690 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -103,7 +103,7 @@ usif_notify(const void *header, u32 length, const void *data, u32 size)
}
break;
default:
- BUG_ON(1);
+ BUG();
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c6a180a0c284..eef22c6b9665 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -13,13 +13,13 @@ static unsigned int
nouveau_vga_set_decode(void *priv, bool state)
{
struct nouveau_drm *drm = nouveau_drm(priv);
- struct nvif_object *device = &drm->device.object;
+ struct nvif_object *device = &drm->client.device.object;
- if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE &&
- drm->device.info.chipset >= 0x4c)
+ if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
+ drm->client.device.info.chipset >= 0x4c)
nvif_wr32(device, 0x088060, state);
else
- if (drm->device.info.chipset >= 0x40)
+ if (drm->client.device.info.chipset >= 0x40)
nvif_wr32(device, 0x088054, state);
else
nvif_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 6a2b187e3c3b..01731dbeb3d8 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -136,7 +136,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
int surface_fmt, pattern_fmt, rect_fmt;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 79bc01111351..6477b7069e14 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,9 +76,9 @@ nv17_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- struct ttm_mem_reg *mem = &priv->bo->bo.mem;
- u32 start = mem->start * PAGE_SIZE;
- u32 limit = start + mem->size - 1;
+ struct ttm_mem_reg *reg = &priv->bo->bo.mem;
+ u32 start = reg->start * PAGE_SIZE;
+ u32 limit = start + reg->size - 1;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -129,7 +129,7 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 32097fd615fd..0b4440ffbeae 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -447,18 +447,18 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
args.base.target = NV_DMA_V0_TARGET_VRAM;
args.base.access = NV_DMA_V0_ACCESS_RDWR;
args.base.start = 0;
- args.base.limit = drm->device.info.ram_user - 1;
+ args.base.limit = drm->client.device.info.ram_user - 1;
- if (drm->device.info.chipset < 0x80) {
+ if (drm->client.device.info.chipset < 0x80) {
args.nv50.part = NV50_DMA_V0_PART_256;
argc += sizeof(args.nv50);
} else
- if (drm->device.info.chipset < 0xc0) {
+ if (drm->client.device.info.chipset < 0xc0) {
args.nv50.part = NV50_DMA_V0_PART_256;
args.nv50.kind = kind;
argc += sizeof(args.nv50);
} else
- if (drm->device.info.chipset < 0xd0) {
+ if (drm->client.device.info.chipset < 0xd0) {
args.gf100.kind = kind;
argc += sizeof(args.gf100);
} else {
@@ -848,7 +848,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
if (asyw->image.kind) {
asyw->image.layout = 0;
- if (drm->device.info.chipset >= 0xc0)
+ if (drm->client.device.info.chipset >= 0xc0)
asyw->image.block = fb->nvbo->tile_mode >> 4;
else
asyw->image.block = fb->nvbo->tile_mode;
@@ -1153,7 +1153,7 @@ nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
if (asyw->state.fb->width != asyw->state.fb->height)
return -EINVAL;
- switch (asyw->state.fb->pixel_format) {
+ switch (asyw->state.fb->format->format) {
case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
default:
WARN_ON(1);
@@ -1397,7 +1397,7 @@ nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
- if (nvif_msec(&drm->device, 2000ULL,
+ if (nvif_msec(&drm->client.device, 2000ULL,
u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
if ((data & 0xc0000000) == 0x40000000)
break;
@@ -1418,12 +1418,10 @@ static int
nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
- const u32 format = asyw->state.fb->pixel_format;
- const struct drm_format_info *info;
+ const struct drm_framebuffer *fb = asyw->state.fb;
int ret;
- info = drm_format_info(format);
- if (!info || !info->depth)
+ if (!fb->format->depth)
return -EINVAL;
ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
@@ -1433,14 +1431,14 @@ nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
if (ret)
return ret;
- asyh->base.depth = info->depth;
- asyh->base.cpp = info->cpp[0];
+ asyh->base.depth = fb->format->depth;
+ asyh->base.cpp = fb->format->cpp[0];
asyh->base.x = asyw->state.src.x1 >> 16;
asyh->base.y = asyw->state.src.y1 >> 16;
asyh->base.w = asyw->state.fb->width;
asyh->base.h = asyw->state.fb->height;
- switch (format) {
+ switch (fb->format->format) {
case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
case DRM_FORMAT_XRGB1555 :
@@ -1524,7 +1522,7 @@ nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
return ret;
}
- ret = nv50_base_create(&drm->device, disp->disp, base->id,
+ ret = nv50_base_create(&drm->client.device, disp->disp, base->id,
disp->sync->bo.offset, &base->chan);
if (ret)
return ret;
@@ -2396,7 +2394,7 @@ static int
nv50_head_create(struct drm_device *dev, int index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->device;
+ struct nvif_device *device = &drm->client.device;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
struct nv50_base *base;
@@ -2430,7 +2428,7 @@ nv50_head_create(struct drm_device *dev, int index)
drm_crtc_helper_add(crtc, &nv50_head_help);
drm_mode_crtc_set_gamma_size(crtc, 256);
- ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 8192, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
@@ -2669,7 +2667,7 @@ static int
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
@@ -3419,7 +3417,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
mstm->outp = outp;
mstm->mgr.cbs = &nv50_mstm;
- ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev->dev, aux, aux_max,
+ ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
max_payloads, conn_base_id);
if (ret)
return ret;
@@ -3625,7 +3623,7 @@ nv50_sor_enable(struct drm_encoder *encoder)
nv50_audio_enable(encoder, mode);
break;
default:
- BUG_ON(1);
+ BUG();
break;
}
@@ -3659,7 +3657,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type, ret;
@@ -3798,7 +3796,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
proto = 0x0;
break;
default:
- BUG_ON(1);
+ BUG();
break;
}
@@ -3844,7 +3842,7 @@ static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+ struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
struct i2c_adapter *ddc;
@@ -3917,7 +3915,7 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
evo_data(push, 0x00000000);
nouveau_bo_wr32(disp->sync, 0, 0x00000000);
evo_kick(push, core);
- if (nvif_msec(&drm->device, 2000ULL,
+ if (nvif_msec(&drm->client.device, 2000ULL,
if (nouveau_bo_rd32(disp->sync, 0))
break;
usleep_range(1, 2);
@@ -4435,7 +4433,7 @@ module_param_named(atomic, nouveau_atomic, int, 0400);
int
nv50_display_create(struct drm_device *dev)
{
- struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nvif_device *device = &nouveau_drm(dev)->client.device;
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *tmp;
@@ -4459,7 +4457,7 @@ nv50_display_create(struct drm_device *dev)
dev->driver->driver_features |= DRIVER_ATOMIC;
/* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &disp->sync);
if (!ret) {
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f68c7054fd53..a369d978e267 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -37,9 +37,9 @@ nv50_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- struct ttm_mem_reg *mem = &priv->bo->bo.mem;
- u32 start = mem->start * PAGE_SIZE;
- u32 limit = start + mem->size - 1;
+ struct ttm_mem_reg *reg = &priv->bo->bo.mem;
+ u32 start = reg->start * PAGE_SIZE;
+ u32 limit = start + reg->size - 1;
int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -82,7 +82,7 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index f0b322bec7df..bd7a8a1e4ad9 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -197,7 +197,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
int
nv84_fence_create(struct nouveau_drm *drm)
{
- struct nvkm_fifo *fifo = nvxx_fifo(&drm->device);
+ struct nvkm_fifo *fifo = nvxx_fifo(&drm->client.device);
struct nv84_fence_priv *priv;
u32 domain;
int ret;
@@ -219,14 +219,14 @@ nv84_fence_create(struct nouveau_drm *drm)
mutex_init(&priv->mutex);
/* Use VRAM if there is any ; otherwise fallback to system memory */
- domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
+ domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
/*
* fences created in sysmem must be non-cached or we
* will lose CPU/GPU coherency!
*/
TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
- ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, domain, 0,
- 0, NULL, NULL, &priv->bo);
+ ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
+ domain, 0, 0, NULL, NULL, &priv->bo);
if (ret == 0) {
ret = nouveau_bo_pin(priv->bo, domain, false);
if (ret == 0) {
@@ -239,7 +239,7 @@ nv84_fence_create(struct nouveau_drm *drm)
}
if (ret == 0)
- ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
+ ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
0, NULL, NULL, &priv->bo_gart);
if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index ff8ed3a04d06..067b5e9f5ec1 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -1,4 +1,5 @@
nvif-y := nvif/object.o
nvif-y += nvif/client.o
nvif-y += nvif/device.o
+nvif-y += nvif/driver.o
nvif-y += nvif/notify.o
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 29c20dfd894d..12db54965c20 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -26,6 +26,9 @@
#include <nvif/driver.h>
#include <nvif/ioctl.h>
+#include <nvif/class.h>
+#include <nvif/if0000.h>
+
int
nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
{
@@ -47,37 +50,29 @@ nvif_client_resume(struct nvif_client *client)
void
nvif_client_fini(struct nvif_client *client)
{
+ nvif_object_fini(&client->object);
if (client->driver) {
- client->driver->fini(client->object.priv);
+ if (client->driver->fini)
+ client->driver->fini(client->object.priv);
client->driver = NULL;
- client->object.client = NULL;
- nvif_object_fini(&client->object);
}
}
-static const struct nvif_driver *
-nvif_drivers[] = {
-#ifdef __KERNEL__
- &nvif_driver_nvkm,
-#else
- &nvif_driver_drm,
- &nvif_driver_lib,
- &nvif_driver_null,
-#endif
- NULL
-};
-
int
-nvif_client_init(const char *driver, const char *name, u64 device,
- const char *cfg, const char *dbg, struct nvif_client *client)
+nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
+ struct nvif_client *client)
{
+ struct nvif_client_v0 args = { .device = device };
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_nop_v0 nop;
- } args = {};
- int ret, i;
+ } nop = {};
+ int ret;
- ret = nvif_object_init(NULL, 0, 0, NULL, 0, &client->object);
+ strncpy(args.name, name, sizeof(args.name));
+ ret = nvif_object_init(parent != client ? &parent->object : NULL,
+ 0, NVIF_CLASS_CLIENT, &args, sizeof(args),
+ &client->object);
if (ret)
return ret;
@@ -85,19 +80,11 @@ nvif_client_init(const char *driver, const char *name, u64 device,
client->object.handle = ~0;
client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
client->super = true;
-
- for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
- if (!driver || !strcmp(client->driver->name, driver)) {
- ret = client->driver->init(name, device, cfg, dbg,
- &client->object.priv);
- if (!ret || driver)
- break;
- }
- }
+ client->driver = parent->driver;
if (ret == 0) {
- ret = nvif_client_ioctl(client, &args, sizeof(args));
- client->version = args.nop.version;
+ ret = nvif_client_ioctl(client, &nop, sizeof(nop));
+ client->version = nop.nop.version;
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.c b/drivers/gpu/drm/nouveau/nvif/driver.c
new file mode 100644
index 000000000000..701330956e33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/driver.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include <nvif/driver.h>
+#include <nvif/client.h>
+
+static const struct nvif_driver *
+nvif_driver[] = {
+#ifdef __KERNEL__
+ &nvif_driver_nvkm,
+#else
+ &nvif_driver_drm,
+ &nvif_driver_lib,
+ &nvif_driver_null,
+#endif
+ NULL
+};
+
+int
+nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
+ const char *name, u64 device, struct nvif_client *client)
+{
+ int ret = -EINVAL, i;
+
+ for (i = 0; (client->driver = nvif_driver[i]); i++) {
+ if (!drv || !strcmp(client->driver->name, drv)) {
+ ret = client->driver->init(name, device, cfg, dbg,
+ &client->object.priv);
+ if (ret == 0)
+ break;
+ client->driver->fini(client->object.priv);
+ }
+ }
+
+ if (ret == 0)
+ ret = nvif_client_init(client, name, device, client);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild
index 2832147b676c..e664378f6eda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild
@@ -1,3 +1,4 @@
include $(src)/nvkm/core/Kbuild
+include $(src)/nvkm/falcon/Kbuild
include $(src)/nvkm/subdev/Kbuild
include $(src)/nvkm/engine/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index e1943910858e..0d3a896892b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -31,6 +31,43 @@
#include <nvif/if0000.h>
#include <nvif/unpack.h>
+static int
+nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ union {
+ struct nvif_client_v0 v0;
+ } *args = argv;
+ struct nvkm_client *client;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){
+ args->v0.name[sizeof(args->v0.name) - 1] = 0;
+ ret = nvkm_client_new(args->v0.name, args->v0.device, NULL,
+ NULL, oclass->client->ntfy, &client);
+ if (ret)
+ return ret;
+ } else
+ return ret;
+
+ client->object.client = oclass->client;
+ client->object.handle = oclass->handle;
+ client->object.route = oclass->route;
+ client->object.token = oclass->token;
+ client->object.object = oclass->object;
+ client->debug = oclass->client->debug;
+ *pobject = &client->object;
+ return 0;
+}
+
+const struct nvkm_sclass
+nvkm_uclient_sclass = {
+ .oclass = NVIF_CLASS_CLIENT,
+ .minver = 0,
+ .maxver = 0,
+ .ctor = nvkm_uclient_new,
+};
+
struct nvkm_client_notify {
struct nvkm_client *client;
struct nvkm_notify n;
@@ -138,17 +175,30 @@ nvkm_client_notify_new(struct nvkm_object *object,
return ret;
}
+static const struct nvkm_object_func nvkm_client;
+struct nvkm_client *
+nvkm_client_search(struct nvkm_client *client, u64 handle)
+{
+ struct nvkm_object *object;
+
+ object = nvkm_object_search(client, handle, &nvkm_client);
+ if (IS_ERR(object))
+ return (void *)object;
+
+ return nvkm_client(object);
+}
+
static int
-nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size)
+nvkm_client_mthd_devlist(struct nvkm_client *client, void *data, u32 size)
{
union {
- struct nv_client_devlist_v0 v0;
+ struct nvif_client_devlist_v0 v0;
} *args = data;
int ret = -ENOSYS;
- nvif_ioctl(object, "client devlist size %d\n", size);
+ nvif_ioctl(&client->object, "client devlist size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- nvif_ioctl(object, "client devlist vers %d count %d\n",
+ nvif_ioctl(&client->object, "client devlist vers %d count %d\n",
args->v0.version, args->v0.count);
if (size == sizeof(args->v0.device[0]) * args->v0.count) {
ret = nvkm_device_list(args->v0.device, args->v0.count);
@@ -167,9 +217,10 @@ nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size)
static int
nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
+ struct nvkm_client *client = nvkm_client(object);
switch (mthd) {
- case NV_CLIENT_DEVLIST:
- return nvkm_client_mthd_devlist(object, data, size);
+ case NVIF_CLIENT_V0_DEVLIST:
+ return nvkm_client_mthd_devlist(client, data, size);
default:
break;
}
@@ -190,7 +241,8 @@ nvkm_client_child_get(struct nvkm_object *object, int index,
const struct nvkm_sclass *sclass;
switch (index) {
- case 0: sclass = &nvkm_udevice_sclass; break;
+ case 0: sclass = &nvkm_uclient_sclass; break;
+ case 1: sclass = &nvkm_udevice_sclass; break;
default:
return -EINVAL;
}
@@ -200,110 +252,54 @@ nvkm_client_child_get(struct nvkm_object *object, int index,
return 0;
}
-static const struct nvkm_object_func
-nvkm_client_object_func = {
- .mthd = nvkm_client_mthd,
- .sclass = nvkm_client_child_get,
-};
-
-void
-nvkm_client_remove(struct nvkm_client *client, struct nvkm_object *object)
-{
- if (!RB_EMPTY_NODE(&object->node))
- rb_erase(&object->node, &client->objroot);
-}
-
-bool
-nvkm_client_insert(struct nvkm_client *client, struct nvkm_object *object)
-{
- struct rb_node **ptr = &client->objroot.rb_node;
- struct rb_node *parent = NULL;
-
- while (*ptr) {
- struct nvkm_object *this =
- container_of(*ptr, typeof(*this), node);
- parent = *ptr;
- if (object->object < this->object)
- ptr = &parent->rb_left;
- else
- if (object->object > this->object)
- ptr = &parent->rb_right;
- else
- return false;
- }
-
- rb_link_node(&object->node, parent, ptr);
- rb_insert_color(&object->node, &client->objroot);
- return true;
-}
-
-struct nvkm_object *
-nvkm_client_search(struct nvkm_client *client, u64 handle)
-{
- struct rb_node *node = client->objroot.rb_node;
- while (node) {
- struct nvkm_object *object =
- container_of(node, typeof(*object), node);
- if (handle < object->object)
- node = node->rb_left;
- else
- if (handle > object->object)
- node = node->rb_right;
- else
- return object;
- }
- return NULL;
-}
-
-int
-nvkm_client_fini(struct nvkm_client *client, bool suspend)
+static int
+nvkm_client_fini(struct nvkm_object *object, bool suspend)
{
- struct nvkm_object *object = &client->object;
+ struct nvkm_client *client = nvkm_client(object);
const char *name[2] = { "fini", "suspend" };
int i;
nvif_debug(object, "%s notify\n", name[suspend]);
for (i = 0; i < ARRAY_SIZE(client->notify); i++)
nvkm_client_notify_put(client, i);
- return nvkm_object_fini(&client->object, suspend);
-}
-
-int
-nvkm_client_init(struct nvkm_client *client)
-{
- return nvkm_object_init(&client->object);
+ return 0;
}
-void
-nvkm_client_del(struct nvkm_client **pclient)
+static void *
+nvkm_client_dtor(struct nvkm_object *object)
{
- struct nvkm_client *client = *pclient;
+ struct nvkm_client *client = nvkm_client(object);
int i;
- if (client) {
- nvkm_client_fini(client, false);
- for (i = 0; i < ARRAY_SIZE(client->notify); i++)
- nvkm_client_notify_del(client, i);
- nvkm_object_dtor(&client->object);
- kfree(*pclient);
- *pclient = NULL;
- }
+ for (i = 0; i < ARRAY_SIZE(client->notify); i++)
+ nvkm_client_notify_del(client, i);
+ return client;
}
+static const struct nvkm_object_func
+nvkm_client = {
+ .dtor = nvkm_client_dtor,
+ .fini = nvkm_client_fini,
+ .mthd = nvkm_client_mthd,
+ .sclass = nvkm_client_child_get,
+};
+
int
nvkm_client_new(const char *name, u64 device, const char *cfg,
- const char *dbg, struct nvkm_client **pclient)
+ const char *dbg,
+ int (*ntfy)(const void *, u32, const void *, u32),
+ struct nvkm_client **pclient)
{
- struct nvkm_oclass oclass = {};
+ struct nvkm_oclass oclass = { .base = nvkm_uclient_sclass };
struct nvkm_client *client;
if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL)))
return -ENOMEM;
oclass.client = client;
- nvkm_object_ctor(&nvkm_client_object_func, &oclass, &client->object);
+ nvkm_object_ctor(&nvkm_client, &oclass, &client->object);
snprintf(client->name, sizeof(client->name), "%s", name);
client->device = device;
client->debug = nvkm_dbgopt(dbg, "CLIENT");
client->objroot = RB_ROOT;
- client->dmaroot = RB_ROOT;
+ client->ntfy = ntfy;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index ee8e5831fe37..b6c916954a10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -27,6 +27,14 @@
#include <subdev/fb.h>
+bool
+nvkm_engine_chsw_load(struct nvkm_engine *engine)
+{
+ if (engine->func->chsw_load)
+ return engine->func->chsw_load(engine);
+ return false;
+}
+
void
nvkm_engine_unref(struct nvkm_engine **pengine)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index b0db51847c36..be19bbe56bba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -29,7 +29,8 @@
#include <nvif/ioctl.h>
static int
-nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_nop(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_nop_v0 v0;
@@ -46,7 +47,8 @@ nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_sclass(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_sclass_v0 v0;
@@ -78,12 +80,12 @@ nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
+nvkm_ioctl_new(struct nvkm_client *client,
+ struct nvkm_object *parent, void *data, u32 size)
{
union {
struct nvif_ioctl_new_v0 v0;
} *args = data;
- struct nvkm_client *client = parent->client;
struct nvkm_object *object = NULL;
struct nvkm_oclass oclass;
int ret = -ENOSYS, i = 0;
@@ -104,9 +106,11 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
do {
memset(&oclass, 0x00, sizeof(oclass));
- oclass.client = client;
oclass.handle = args->v0.handle;
+ oclass.route = args->v0.route;
+ oclass.token = args->v0.token;
oclass.object = args->v0.object;
+ oclass.client = client;
oclass.parent = parent;
ret = parent->func->sclass(parent, i++, &oclass);
if (ret)
@@ -125,10 +129,7 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
ret = nvkm_object_init(object);
if (ret == 0) {
list_add(&object->head, &parent->tree);
- object->route = args->v0.route;
- object->token = args->v0.token;
- object->object = args->v0.object;
- if (nvkm_client_insert(client, object)) {
+ if (nvkm_object_insert(object)) {
client->data = object;
return 0;
}
@@ -142,7 +143,8 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
}
static int
-nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_del(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_del none;
@@ -156,11 +158,12 @@ nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
nvkm_object_del(&object);
}
- return ret;
+ return ret ? ret : 1;
}
static int
-nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_mthd(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_mthd_v0 v0;
@@ -179,7 +182,8 @@ nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
static int
-nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_rd(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_rd_v0 v0;
@@ -218,7 +222,8 @@ nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_wr(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_wr_v0 v0;
@@ -246,7 +251,8 @@ nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_map(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_map_v0 v0;
@@ -264,7 +270,8 @@ nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_unmap(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_unmap none;
@@ -280,7 +287,8 @@ nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_ntfy_new(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_ntfy_new_v0 v0;
@@ -306,9 +314,9 @@ nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_ntfy_del(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_client *client = object->client;
union {
struct nvif_ioctl_ntfy_del_v0 v0;
} *args = data;
@@ -325,9 +333,9 @@ nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_ntfy_get(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_client *client = object->client;
union {
struct nvif_ioctl_ntfy_get_v0 v0;
} *args = data;
@@ -344,9 +352,9 @@ nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
}
static int
-nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
+nvkm_ioctl_ntfy_put(struct nvkm_client *client,
+ struct nvkm_object *object, void *data, u32 size)
{
- struct nvkm_client *client = object->client;
union {
struct nvif_ioctl_ntfy_put_v0 v0;
} *args = data;
@@ -364,7 +372,7 @@ nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
static struct {
int version;
- int (*func)(struct nvkm_object *, void *, u32);
+ int (*func)(struct nvkm_client *, struct nvkm_object *, void *, u32);
}
nvkm_ioctl_v0[] = {
{ 0x00, nvkm_ioctl_nop },
@@ -389,13 +397,10 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
struct nvkm_object *object;
int ret;
- if (handle)
- object = nvkm_client_search(client, handle);
- else
- object = &client->object;
- if (unlikely(!object)) {
+ object = nvkm_object_search(client, handle, NULL);
+ if (IS_ERR(object)) {
nvif_ioctl(&client->object, "object not found\n");
- return -ENOENT;
+ return PTR_ERR(object);
}
if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
@@ -407,7 +412,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
if (nvkm_ioctl_v0[type].version == 0)
- ret = nvkm_ioctl_v0[type].func(object, data, size);
+ ret = nvkm_ioctl_v0[type].func(client, object, data, size);
}
return ret;
@@ -436,12 +441,13 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
&args->v0.route, &args->v0.token);
}
- nvif_ioctl(object, "return %d\n", ret);
- if (hack) {
- *hack = client->data;
- client->data = NULL;
+ if (ret != 1) {
+ nvif_ioctl(object, "return %d\n", ret);
+ if (hack) {
+ *hack = client->data;
+ client->data = NULL;
+ }
}
- client->super = false;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 09a1eee8fd33..fd19d652a7ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -147,6 +147,7 @@ nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
if (!this)
return -ENOMEM;
+ this->next = NULL;
this->type = type;
list_del(&this->fl_entry);
*pnode = this;
@@ -225,6 +226,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
if (!this)
return -ENOMEM;
+ this->next = NULL;
this->type = type;
list_del(&this->fl_entry);
*pnode = this;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 67aa7223dcd7..89d2e9da11c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -25,6 +25,65 @@
#include <core/client.h>
#include <core/engine.h>
+struct nvkm_object *
+nvkm_object_search(struct nvkm_client *client, u64 handle,
+ const struct nvkm_object_func *func)
+{
+ struct nvkm_object *object;
+
+ if (handle) {
+ struct rb_node *node = client->objroot.rb_node;
+ while (node) {
+ object = rb_entry(node, typeof(*object), node);
+ if (handle < object->object)
+ node = node->rb_left;
+ else
+ if (handle > object->object)
+ node = node->rb_right;
+ else
+ goto done;
+ }
+ return ERR_PTR(-ENOENT);
+ } else {
+ object = &client->object;
+ }
+
+done:
+ if (unlikely(func && object->func != func))
+ return ERR_PTR(-EINVAL);
+ return object;
+}
+
+void
+nvkm_object_remove(struct nvkm_object *object)
+{
+ if (!RB_EMPTY_NODE(&object->node))
+ rb_erase(&object->node, &object->client->objroot);
+}
+
+bool
+nvkm_object_insert(struct nvkm_object *object)
+{
+ struct rb_node **ptr = &object->client->objroot.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*ptr) {
+ struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
+ parent = *ptr;
+ if (object->object < this->object)
+ ptr = &parent->rb_left;
+ else
+ if (object->object > this->object)
+ ptr = &parent->rb_right;
+ else
+ return false;
+ }
+
+ rb_link_node(&object->node, parent, ptr);
+ rb_insert_color(&object->node, &object->client->objroot);
+ return true;
+}
+
int
nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
@@ -214,7 +273,7 @@ nvkm_object_del(struct nvkm_object **pobject)
struct nvkm_object *object = *pobject;
if (object && !WARN_ON(!object->func)) {
*pobject = nvkm_object_dtor(object);
- nvkm_client_remove(object->client, object);
+ nvkm_object_remove(object);
list_del(&object->head);
kfree(*pobject);
*pobject = NULL;
@@ -230,6 +289,9 @@ nvkm_object_ctor(const struct nvkm_object_func *func,
object->engine = nvkm_engine_ref(oclass->engine);
object->oclass = oclass->base.oclass;
object->handle = oclass->handle;
+ object->route = oclass->route;
+ object->token = oclass->token;
+ object->object = oclass->object;
INIT_LIST_HEAD(&object->head);
INIT_LIST_HEAD(&object->tree);
RB_CLEAR_NODE(&object->node);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index cceda959b47c..273562dd6bbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -993,7 +993,7 @@ nv92_chipset = {
.mc = g84_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
- .pci = g84_pci_new,
+ .pci = g92_pci_new,
.therm = g84_therm_new,
.timer = nv41_timer_new,
.volt = nv40_volt_new,
@@ -2138,6 +2138,7 @@ nv12b_chipset = {
.ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
.mmu = gf100_mmu_new,
+ .pmu = gm20b_pmu_new,
.secboot = gm20b_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 0a1381a84552..070ec5e18fdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -137,7 +137,6 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
- struct nvkm_device *device = root->disp->base.engine.subdev.device;
struct nvkm_client *client = oclass->client;
struct nvkm_dmaobj *dmaobj;
struct nv50_disp_dmac *chan;
@@ -153,9 +152,9 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
if (ret)
return ret;
- dmaobj = nvkm_dma_search(device->dma, client, push);
- if (!dmaobj)
- return -ENOENT;
+ dmaobj = nvkm_dmaobj_search(client, push);
+ if (IS_ERR(dmaobj))
+ return PTR_ERR(dmaobj);
if (dmaobj->limit - dmaobj->start != 0xfff)
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 4510cb6e10a8..627b9ee1ddd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -39,13 +39,6 @@ g94_sor_loff(struct nvkm_output_dp *outp)
}
/*******************************************************************************
- * TMDS/LVDS
- ******************************************************************************/
-static const struct nvkm_output_func
-g94_sor_output_func = {
-};
-
-/*******************************************************************************
* DisplayPort
******************************************************************************/
u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
index f11ebdd16c77..11b7b8fd5dda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -28,24 +28,6 @@
#include <nvif/class.h>
-struct nvkm_dmaobj *
-nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object)
-{
- struct rb_node *node = client->dmaroot.rb_node;
- while (node) {
- struct nvkm_dmaobj *dmaobj =
- container_of(node, typeof(*dmaobj), rb);
- if (object < dmaobj->handle)
- node = node->rb_left;
- else
- if (object > dmaobj->handle)
- node = node->rb_right;
- else
- return dmaobj;
- }
- return NULL;
-}
-
static int
nvkm_dma_oclass_new(struct nvkm_device *device,
const struct nvkm_oclass *oclass, void *data, u32 size,
@@ -53,34 +35,12 @@ nvkm_dma_oclass_new(struct nvkm_device *device,
{
struct nvkm_dma *dma = nvkm_dma(oclass->engine);
struct nvkm_dmaobj *dmaobj = NULL;
- struct nvkm_client *client = oclass->client;
- struct rb_node **ptr = &client->dmaroot.rb_node;
- struct rb_node *parent = NULL;
int ret;
ret = dma->func->class_new(dma, oclass, data, size, &dmaobj);
if (dmaobj)
*pobject = &dmaobj->object;
- if (ret)
- return ret;
-
- dmaobj->handle = oclass->object;
-
- while (*ptr) {
- struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb);
- parent = *ptr;
- if (dmaobj->handle < obj->handle)
- ptr = &parent->rb_left;
- else
- if (dmaobj->handle > obj->handle)
- ptr = &parent->rb_right;
- else
- return -EEXIST;
- }
-
- rb_link_node(&dmaobj->rb, parent, ptr);
- rb_insert_color(&dmaobj->rb, &client->dmaroot);
- return 0;
+ return ret;
}
static const struct nvkm_device_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
index 13c661b1ef14..d20cc0681a88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -31,6 +31,19 @@
#include <nvif/cl0002.h>
#include <nvif/unpack.h>
+static const struct nvkm_object_func nvkm_dmaobj_func;
+struct nvkm_dmaobj *
+nvkm_dmaobj_search(struct nvkm_client *client, u64 handle)
+{
+ struct nvkm_object *object;
+
+ object = nvkm_object_search(client, handle, &nvkm_dmaobj_func);
+ if (IS_ERR(object))
+ return (void *)object;
+
+ return nvkm_dmaobj(object);
+}
+
static int
nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
int align, struct nvkm_gpuobj **pgpuobj)
@@ -42,10 +55,7 @@ nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
static void *
nvkm_dmaobj_dtor(struct nvkm_object *base)
{
- struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
- if (!RB_EMPTY_NODE(&dmaobj->rb))
- rb_erase(&dmaobj->rb, &dmaobj->object.client->dmaroot);
- return dmaobj;
+ return nvkm_dmaobj(base);
}
static const struct nvkm_object_func
@@ -74,7 +84,6 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
dmaobj->func = func;
dmaobj->dma = dma;
- RB_CLEAR_NODE(&dmaobj->rb);
nvif_ioctl(parent, "create dma size %d\n", *psize);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 1c9682ae3a6b..660ca7aa95ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -32,6 +32,17 @@
#include <nvif/unpack.h>
void
+nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
+{
+ unsigned long flags;
+ if (WARN_ON(!fifo->func->recover_chan))
+ return;
+ spin_lock_irqsave(&fifo->lock, flags);
+ fifo->func->recover_chan(fifo, chid);
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+void
nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
{
return fifo->func->pause(fifo, flags);
@@ -55,19 +66,29 @@ nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
}
struct nvkm_fifo_chan *
-nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
+nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst)
{
struct nvkm_fifo_chan *chan;
- unsigned long flags;
- spin_lock_irqsave(&fifo->lock, flags);
list_for_each_entry(chan, &fifo->chan, head) {
if (chan->inst->addr == inst) {
list_del(&chan->head);
list_add(&chan->head, &fifo->chan);
- *rflags = flags;
return chan;
}
}
+ return NULL;
+}
+
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
+{
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ spin_lock_irqsave(&fifo->lock, flags);
+ if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) {
+ *rflags = flags;
+ return chan;
+ }
spin_unlock_irqrestore(&fifo->lock, flags);
return NULL;
}
@@ -90,9 +111,34 @@ nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
return NULL;
}
+void
+nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
+{
+ nvkm_event_send(&fifo->kevent, 1, chid, NULL, 0);
+}
+
static int
-nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
- struct nvkm_notify *notify)
+nvkm_fifo_kevent_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
+{
+ struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ if (size == 0) {
+ notify->size = 0;
+ notify->types = 1;
+ notify->index = chan->chid;
+ return 0;
+ }
+ return -ENOSYS;
+}
+
+static const struct nvkm_event_func
+nvkm_fifo_kevent_func = {
+ .ctor = nvkm_fifo_kevent_ctor,
+};
+
+static int
+nvkm_fifo_cevent_ctor(struct nvkm_object *object, void *data, u32 size,
+ struct nvkm_notify *notify)
{
if (size == 0) {
notify->size = 0;
@@ -104,10 +150,16 @@ nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
}
static const struct nvkm_event_func
-nvkm_fifo_event_func = {
- .ctor = nvkm_fifo_event_ctor,
+nvkm_fifo_cevent_func = {
+ .ctor = nvkm_fifo_cevent_ctor,
};
+void
+nvkm_fifo_cevent(struct nvkm_fifo *fifo)
+{
+ nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
+}
+
static void
nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
@@ -241,6 +293,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
void *data = fifo;
if (fifo->func->dtor)
data = fifo->func->dtor(fifo);
+ nvkm_event_fini(&fifo->kevent);
nvkm_event_fini(&fifo->cevent);
nvkm_event_fini(&fifo->uevent);
return data;
@@ -283,5 +336,9 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
return ret;
}
- return nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent);
+ ret = nvkm_event_init(&nvkm_fifo_cevent_func, 1, 1, &fifo->cevent);
+ if (ret)
+ return ret;
+
+ return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index dc6d4678f228..fab760ae922f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -371,9 +371,9 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
/* allocate push buffer ctxdma instance */
if (push) {
- dmaobj = nvkm_dma_search(device->dma, oclass->client, push);
- if (!dmaobj)
- return -ENOENT;
+ dmaobj = nvkm_dmaobj_search(client, push);
+ if (IS_ERR(dmaobj))
+ return PTR_ERR(dmaobj);
ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
&chan->push);
@@ -410,6 +410,6 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
base + user * chan->chid;
chan->size = user;
- nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
+ nvkm_fifo_cevent(fifo);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 55dc415c5c08..d8019bdacd61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -29,5 +29,5 @@ struct nvkm_fifo_chan_oclass {
struct nvkm_sclass base;
};
-int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
+int gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index 15a992b3580a..61797c4dd07a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -30,12 +30,12 @@
#include <nvif/cl826e.h>
-int
+static int
g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
struct nvkm_event **pevent)
{
switch (type) {
- case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
+ case NV826E_V0_NTFY_NON_STALL_INTERRUPT:
*pevent = &chan->fifo->uevent;
return 0;
default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index ec68ea9747d5..cd468ab1db12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -68,7 +68,14 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
}
nvkm_done(cur);
- target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0;
+ switch (nvkm_memory_target(cur)) {
+ case NVKM_MEM_TARGET_VRAM: target = 0; break;
+ case NVKM_MEM_TARGET_NCOH: target = 3; break;
+ default:
+ mutex_unlock(&subdev->mutex);
+ WARN_ON(1);
+ return;
+ }
nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
(target << 28));
@@ -183,6 +190,7 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
if (engine != &fifo->base.engine)
fifo->recover.mask |= 1ULL << engine->subdev.index;
schedule_work(&fifo->recover.work);
+ nvkm_fifo_kevent(&fifo->base, chid);
}
static const struct nvkm_enum
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 38c0910722c0..3a24788c3185 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -27,11 +27,71 @@
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/bar.h>
+#include <subdev/timer.h>
#include <subdev/top.h>
#include <engine/sw.h>
#include <nvif/class.h>
+struct gk104_fifo_engine_status {
+ bool busy;
+ bool faulted;
+ bool chsw;
+ bool save;
+ bool load;
+ struct {
+ bool tsg;
+ u32 id;
+ } prev, next, *chan;
+};
+
+static void
+gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
+ struct gk104_fifo_engine_status *status)
+{
+ struct nvkm_engine *engine = fifo->engine[engn].engine;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
+
+ status->busy = !!(stat & 0x80000000);
+ status->faulted = !!(stat & 0x40000000);
+ status->next.tsg = !!(stat & 0x10000000);
+ status->next.id = (stat & 0x0fff0000) >> 16;
+ status->chsw = !!(stat & 0x00008000);
+ status->save = !!(stat & 0x00004000);
+ status->load = !!(stat & 0x00002000);
+ status->prev.tsg = !!(stat & 0x00001000);
+ status->prev.id = (stat & 0x00000fff);
+ status->chan = NULL;
+
+ if (status->busy && status->chsw) {
+ if (status->load && status->save) {
+ if (engine && nvkm_engine_chsw_load(engine))
+ status->chan = &status->next;
+ else
+ status->chan = &status->prev;
+ } else
+ if (status->load) {
+ status->chan = &status->next;
+ } else {
+ status->chan = &status->prev;
+ }
+ } else
+ if (status->load) {
+ status->chan = &status->prev;
+ }
+
+ nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d "
+ "save %d load %d %sid %d%s-> %sid %d%s\n",
+ engn, status->busy, status->faulted,
+ status->chsw, status->save, status->load,
+ status->prev.tsg ? "tsg" : "ch", status->prev.id,
+ status->chan == &status->prev ? "*" : " ",
+ status->next.tsg ? "tsg" : "ch", status->next.id,
+ status->chan == &status->next ? "*" : " ");
+}
+
static int
gk104_fifo_class_get(struct nvkm_fifo *base, int index,
const struct nvkm_fifo_chan_oclass **psclass)
@@ -83,10 +143,13 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
}
nvkm_done(mem);
- if (nvkm_memory_target(mem) == NVKM_MEM_TARGET_VRAM)
- target = 0;
- else
- target = 3;
+ switch (nvkm_memory_target(mem)) {
+ case NVKM_MEM_TARGET_VRAM: target = 0; break;
+ case NVKM_MEM_TARGET_NCOH: target = 3; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
(target << 28));
@@ -149,31 +212,137 @@ gk104_fifo_recover_work(struct work_struct *w)
nvkm_mask(device, 0x002630, runm, 0x00000000);
}
+static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
+
static void
-gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
- struct gk104_fifo_chan *chan)
+gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- u32 chid = chan->base.chid;
- int engn;
+ const u32 runm = BIT(runl);
- nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
- nvkm_subdev_name[engine->subdev.index], chid);
assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.runm & runm)
+ return;
+ fifo->recover.runm |= runm;
- nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
- list_del_init(&chan->head);
- chan->killed = true;
+ /* Block runlist to prevent channel assignment(s) from changing. */
+ nvkm_mask(device, 0x002630, runm, runm);
- for (engn = 0; engn < fifo->engine_nr; engn++) {
- if (fifo->engine[engn].engine == engine) {
- fifo->recover.engm |= BIT(engn);
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
+ schedule_work(&fifo->recover.work);
+}
+
+static void
+gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
+ const u32 runl = (stat & 0x000f0000) >> 16;
+ const bool used = (stat & 0x00000001);
+ unsigned long engn, engm = fifo->runlist[runl].engm;
+ struct gk104_fifo_chan *chan;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (!used)
+ return;
+
+ /* Lookup SW state for channel, and mark it as dead. */
+ list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+ if (chan->base.chid == chid) {
+ list_del_init(&chan->head);
+ chan->killed = true;
+ nvkm_fifo_kevent(&fifo->base, chid);
break;
}
}
- fifo->recover.runm |= BIT(chan->runl);
+ /* Disable channel. */
+ nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
+ nvkm_warn(subdev, "channel %d: killed\n", chid);
+
+ /* Block channel assignments from changing during recovery. */
+ gk104_fifo_recover_runl(fifo, runl);
+
+ /* Schedule recovery for any engines the channel is on. */
+ for_each_set_bit(engn, &engm, fifo->engine_nr) {
+ struct gk104_fifo_engine_status status;
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (!status.chan || status.chan->id != chid)
+ continue;
+ gk104_fifo_recover_engn(fifo, engn);
+ }
+}
+
+static void
+gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
+{
+ struct nvkm_engine *engine = fifo->engine[engn].engine;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runl = fifo->engine[engn].runl;
+ const u32 engm = BIT(engn);
+ struct gk104_fifo_engine_status status;
+ int mmui = -1;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.engm & engm)
+ return;
+ fifo->recover.engm |= engm;
+
+ /* Block channel assignments from changing during recovery. */
+ gk104_fifo_recover_runl(fifo, runl);
+
+ /* Determine which channel (if any) is currently on the engine. */
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (status.chan) {
+ /* The channel is not longer viable, kill it. */
+ gk104_fifo_recover_chan(&fifo->base, status.chan->id);
+ }
+
+ /* Determine MMU fault ID for the engine, if we're not being
+ * called from the fault handler already.
+ */
+ if (!status.faulted && engine) {
+ mmui = nvkm_top_fault_id(device, engine->subdev.index);
+ if (mmui < 0) {
+ const struct nvkm_enum *en = fifo->func->fault.engine;
+ for (; en && en->name; en++) {
+ if (en->data2 == engine->subdev.index) {
+ mmui = en->value;
+ break;
+ }
+ }
+ }
+ WARN_ON(mmui < 0);
+ }
+
+ /* Trigger a MMU fault for the engine.
+ *
+ * No good idea why this is needed, but nvgpu does something similar,
+ * and it makes recovery from CTXSW_TIMEOUT a lot more reliable.
+ */
+ if (mmui >= 0) {
+ nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui);
+
+ /* Wait for fault to trigger. */
+ nvkm_msec(device, 2000,
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (status.faulted)
+ break;
+ );
+
+ /* Release MMU fault trigger, and ACK the fault. */
+ nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000);
+ nvkm_wr32(device, 0x00259c, BIT(mmui));
+ nvkm_wr32(device, 0x002100, 0x10000000);
+ }
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
schedule_work(&fifo->recover.work);
}
@@ -211,34 +380,30 @@ static void
gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
- struct gk104_fifo_chan *chan;
- unsigned long flags;
+ unsigned long flags, engm = 0;
u32 engn;
+ /* We need to ACK the SCHED_ERROR here, and prevent it reasserting,
+ * as MMU_FAULT cannot be triggered while it's pending.
+ */
spin_lock_irqsave(&fifo->base.lock, flags);
+ nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+
for (engn = 0; engn < fifo->engine_nr; engn++) {
- struct nvkm_engine *engine = fifo->engine[engn].engine;
- int runl = fifo->engine[engn].runl;
- u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
- u32 busy = (stat & 0x80000000);
- u32 next = (stat & 0x0fff0000) >> 16;
- u32 chsw = (stat & 0x00008000);
- u32 save = (stat & 0x00004000);
- u32 load = (stat & 0x00002000);
- u32 prev = (stat & 0x00000fff);
- u32 chid = load ? next : prev;
- (void)save;
-
- if (!busy || !chsw)
+ struct gk104_fifo_engine_status status;
+
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (!status.busy || !status.chsw)
continue;
- list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
- if (chan->base.chid == chid && engine) {
- gk104_fifo_recover(fifo, engine, chan);
- break;
- }
- }
+ engm |= BIT(engn);
}
+
+ for_each_set_bit(engn, &engm, fifo->engine_nr)
+ gk104_fifo_recover_engn(fifo, engn);
+
+ nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
spin_unlock_irqrestore(&fifo->base.lock, flags);
}
@@ -301,6 +466,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
struct nvkm_fifo_chan *chan;
unsigned long flags;
char gpcid[8] = "", en[16] = "";
+ int engn;
er = nvkm_enum_find(fifo->func->fault.reason, reason);
eu = nvkm_enum_find(fifo->func->fault.engine, unit);
@@ -342,7 +508,8 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
snprintf(en, sizeof(en), "%s", eu->name);
}
- chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ chan = nvkm_fifo_chan_inst_locked(&fifo->base, (u64)inst << 12);
nvkm_error(subdev,
"%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
@@ -353,9 +520,23 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
(u64)inst << 12,
chan ? chan->object.client->name : "unknown");
- if (engine && chan)
- gk104_fifo_recover(fifo, engine, (void *)chan);
- nvkm_fifo_chan_put(&fifo->base, flags, &chan);
+
+ /* Kill the channel that caused the fault. */
+ if (chan)
+ gk104_fifo_recover_chan(&fifo->base, chan->chid);
+
+ /* Channel recovery will probably have already done this for the
+ * correct engine(s), but just in case we can't find the channel
+ * information...
+ */
+ for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+ if (fifo->engine[engn].engine == engine) {
+ gk104_fifo_recover_engn(fifo, engn);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
}
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
@@ -716,6 +897,7 @@ gk104_fifo_ = {
.intr = gk104_fifo_intr,
.uevent_init = gk104_fifo_uevent_init,
.uevent_fini = gk104_fifo_uevent_fini,
+ .recover_chan = gk104_fifo_recover_chan,
.class_get = gk104_fifo_class_get,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index 12d964260a29..f9e0377d3d24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -32,6 +32,23 @@
#include <nvif/cl906f.h>
#include <nvif/unpack.h>
+int
+gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
+ struct nvkm_event **pevent)
+{
+ switch (type) {
+ case NV906F_V0_NTFY_NON_STALL_INTERRUPT:
+ *pevent = &chan->fifo->uevent;
+ return 0;
+ case NV906F_V0_NTFY_KILLED:
+ *pevent = &chan->fifo->kevent;
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
static u32
gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
{
@@ -184,7 +201,7 @@ gf100_fifo_gpfifo_func = {
.dtor = gf100_fifo_gpfifo_dtor,
.init = gf100_fifo_gpfifo_init,
.fini = gf100_fifo_gpfifo_fini,
- .ntfy = g84_fifo_chan_ntfy,
+ .ntfy = gf100_fifo_chan_ntfy,
.engine_ctor = gf100_fifo_gpfifo_engine_ctor,
.engine_dtor = gf100_fifo_gpfifo_engine_dtor,
.engine_init = gf100_fifo_gpfifo_engine_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index a2df4f3e7763..8abf6f8ef445 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -50,6 +50,7 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
) < 0) {
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
chan->base.chid, client->name);
+ nvkm_fifo_recover_chan(&fifo->base, chan->base.chid);
ret = -ETIMEDOUT;
}
mutex_unlock(&subdev->mutex);
@@ -213,7 +214,7 @@ gk104_fifo_gpfifo_func = {
.dtor = gk104_fifo_gpfifo_dtor,
.init = gk104_fifo_gpfifo_init,
.fini = gk104_fifo_gpfifo_fini,
- .ntfy = g84_fifo_chan_ntfy,
+ .ntfy = gf100_fifo_chan_ntfy,
.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gk104_fifo_gpfifo_engine_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index f6dfb37d9429..f889b13b5e41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -6,6 +6,12 @@
int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
int index, int nr, struct nvkm_fifo *);
void nvkm_fifo_uevent(struct nvkm_fifo *);
+void nvkm_fifo_cevent(struct nvkm_fifo *);
+void nvkm_fifo_kevent(struct nvkm_fifo *, int chid);
+void nvkm_fifo_recover_chan(struct nvkm_fifo *, int chid);
+
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst_locked(struct nvkm_fifo *, u64 inst);
struct nvkm_fifo_chan_oclass;
struct nvkm_fifo_func {
@@ -18,6 +24,7 @@ struct nvkm_fifo_func {
void (*start)(struct nvkm_fifo *, unsigned long *);
void (*uevent_init)(struct nvkm_fifo *);
void (*uevent_fini)(struct nvkm_fifo *);
+ void (*recover_chan)(struct nvkm_fifo *, int chid);
int (*class_get)(struct nvkm_fifo *, int index,
const struct nvkm_fifo_chan_oclass **);
const struct nvkm_fifo_chan_oclass *chan[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index 467065d1b4e6..cd8cf6f7024c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -25,6 +25,15 @@
#include <engine/fifo.h>
+static bool
+nvkm_gr_chsw_load(struct nvkm_engine *engine)
+{
+ struct nvkm_gr *gr = nvkm_gr(engine);
+ if (gr->func->chsw_load)
+ return gr->func->chsw_load(gr);
+ return false;
+}
+
static void
nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile)
{
@@ -106,6 +115,15 @@ nvkm_gr_init(struct nvkm_engine *engine)
return gr->func->init(gr);
}
+static int
+nvkm_gr_fini(struct nvkm_engine *engine, bool suspend)
+{
+ struct nvkm_gr *gr = nvkm_gr(engine);
+ if (gr->func->fini)
+ return gr->func->fini(gr, suspend);
+ return 0;
+}
+
static void *
nvkm_gr_dtor(struct nvkm_engine *engine)
{
@@ -120,8 +138,10 @@ nvkm_gr = {
.dtor = nvkm_gr_dtor,
.oneinit = nvkm_gr_oneinit,
.init = nvkm_gr_init,
+ .fini = nvkm_gr_fini,
.intr = nvkm_gr_intr,
.tile = nvkm_gr_tile,
+ .chsw_load = nvkm_gr_chsw_load,
.fifo.cclass = nvkm_gr_cclass_new,
.fifo.sclass = nvkm_gr_oclass_get,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
index ce913300539f..da1ba74682b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
@@ -25,6 +25,8 @@
#include <subdev/timer.h>
+#include <nvif/class.h>
+
static const struct nvkm_bitfield nv50_gr_status[] = {
{ 0x00000001, "BUSY" }, /* set when any bit is set */
{ 0x00000002, "DISPATCH" },
@@ -180,11 +182,11 @@ g84_gr = {
.tlb_flush = g84_gr_tlb_flush,
.units = nv50_gr_units,
.sclass = {
- { -1, -1, 0x0030, &nv50_gr_object },
- { -1, -1, 0x502d, &nv50_gr_object },
- { -1, -1, 0x5039, &nv50_gr_object },
- { -1, -1, 0x50c0, &nv50_gr_object },
- { -1, -1, 0x8297, &nv50_gr_object },
+ { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
+ { -1, -1, NV50_TWOD, &nv50_gr_object },
+ { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
+ { -1, -1, NV50_COMPUTE, &nv50_gr_object },
+ { -1, -1, G82_TESLA, &nv50_gr_object },
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index f65a5b0a1a4d..f9acb8a944d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -702,6 +702,22 @@ gf100_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
+static bool
+gf100_gr_chsw_load(struct nvkm_gr *base)
+{
+ struct gf100_gr *gr = gf100_gr(base);
+ if (!gr->firmware) {
+ u32 trace = nvkm_rd32(gr->base.engine.subdev.device, 0x40981c);
+ if (trace & 0x00000040)
+ return true;
+ } else {
+ u32 mthd = nvkm_rd32(gr->base.engine.subdev.device, 0x409808);
+ if (mthd & 0x00080000)
+ return true;
+ }
+ return false;
+}
+
int
gf100_gr_rops(struct gf100_gr *gr)
{
@@ -1136,7 +1152,7 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
if (trap & 0x00000008) {
u32 stat = nvkm_rd32(device, 0x408030);
- nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error,
+ nvkm_snprintbf(error, sizeof(error), gf100_ccache_error,
stat & 0x3fffffff);
nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error);
nvkm_wr32(device, 0x408030, 0xc0000000);
@@ -1391,26 +1407,11 @@ gf100_gr_intr(struct nvkm_gr *base)
}
static void
-gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
+gf100_gr_init_fw(struct nvkm_falcon *falcon,
struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
{
- struct nvkm_device *device = gr->base.engine.subdev.device;
- int i;
-
- nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000);
- for (i = 0; i < data->size / 4; i++)
- nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]);
-
- nvkm_wr32(device, fuc_base + 0x0180, 0x01000000);
- for (i = 0; i < code->size / 4; i++) {
- if ((i & 0x3f) == 0)
- nvkm_wr32(device, fuc_base + 0x0188, i >> 6);
- nvkm_wr32(device, fuc_base + 0x0184, code->data[i]);
- }
-
- /* code must be padded to 0x40 words */
- for (; i & 0x3f; i++)
- nvkm_wr32(device, fuc_base + 0x0184, 0);
+ nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0);
+ nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false);
}
static void
@@ -1455,162 +1456,149 @@ gf100_gr_init_csdata(struct gf100_gr *gr,
nvkm_wr32(device, falcon + 0x01c4, star + 4);
}
-int
-gf100_gr_init_ctxctl(struct gf100_gr *gr)
+/* Initialize context from an external (secure or not) firmware */
+static int
+gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
{
- const struct gf100_grctx_func *grctx = gr->func->grctx;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_secboot *sb = device->secboot;
- int i;
int ret = 0;
- if (gr->firmware) {
- /* load fuc microcode */
- nvkm_mc_unk260(device, 0);
-
- /* securely-managed falcons must be reset using secure boot */
- if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
- ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
- else
- gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
- &gr->fuc409d);
- if (ret)
- return ret;
+ /* load fuc microcode */
+ nvkm_mc_unk260(device, 0);
- if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
- ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
- else
- gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
- &gr->fuc41ad);
- if (ret)
- return ret;
+ /* securely-managed falcons must be reset using secure boot */
+ if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
+ ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
+ else
+ gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d);
+ if (ret)
+ return ret;
- nvkm_mc_unk260(device, 1);
-
- /* start both of them running */
- nvkm_wr32(device, 0x409840, 0xffffffff);
- nvkm_wr32(device, 0x41a10c, 0x00000000);
- nvkm_wr32(device, 0x40910c, 0x00000000);
-
- if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
- nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_GPCCS);
- else
- nvkm_wr32(device, 0x41a100, 0x00000002);
- if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
- nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_FECS);
- else
- nvkm_wr32(device, 0x409100, 0x00000002);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x409800) & 0x00000001)
- break;
- ) < 0)
- return -EBUSY;
+ if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
+ ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
+ else
+ gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad);
+ if (ret)
+ return ret;
+
+ nvkm_mc_unk260(device, 1);
+
+ /* start both of them running */
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x41a10c, 0x00000000);
+ nvkm_wr32(device, 0x40910c, 0x00000000);
+
+ nvkm_falcon_start(gr->gpccs);
+ nvkm_falcon_start(gr->fecs);
- nvkm_wr32(device, 0x409840, 0xffffffff);
- nvkm_wr32(device, 0x409500, 0x7fffffff);
- nvkm_wr32(device, 0x409504, 0x00000021);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800) & 0x00000001)
+ break;
+ ) < 0)
+ return -EBUSY;
+
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x7fffffff);
+ nvkm_wr32(device, 0x409504, 0x00000021);
+
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x00000000);
+ nvkm_wr32(device, 0x409504, 0x00000010);
+ if (nvkm_msec(device, 2000,
+ if ((gr->size = nvkm_rd32(device, 0x409800)))
+ break;
+ ) < 0)
+ return -EBUSY;
+
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x00000000);
+ nvkm_wr32(device, 0x409504, 0x00000016);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800))
+ break;
+ ) < 0)
+ return -EBUSY;
+
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x00000000);
+ nvkm_wr32(device, 0x409504, 0x00000025);
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800))
+ break;
+ ) < 0)
+ return -EBUSY;
- nvkm_wr32(device, 0x409840, 0xffffffff);
- nvkm_wr32(device, 0x409500, 0x00000000);
- nvkm_wr32(device, 0x409504, 0x00000010);
+ if (device->chipset >= 0xe0) {
+ nvkm_wr32(device, 0x409800, 0x00000000);
+ nvkm_wr32(device, 0x409500, 0x00000001);
+ nvkm_wr32(device, 0x409504, 0x00000030);
if (nvkm_msec(device, 2000,
- if ((gr->size = nvkm_rd32(device, 0x409800)))
+ if (nvkm_rd32(device, 0x409800))
break;
) < 0)
return -EBUSY;
- nvkm_wr32(device, 0x409840, 0xffffffff);
- nvkm_wr32(device, 0x409500, 0x00000000);
- nvkm_wr32(device, 0x409504, 0x00000016);
+ nvkm_wr32(device, 0x409810, 0xb00095c8);
+ nvkm_wr32(device, 0x409800, 0x00000000);
+ nvkm_wr32(device, 0x409500, 0x00000001);
+ nvkm_wr32(device, 0x409504, 0x00000031);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800))
break;
) < 0)
return -EBUSY;
- nvkm_wr32(device, 0x409840, 0xffffffff);
- nvkm_wr32(device, 0x409500, 0x00000000);
- nvkm_wr32(device, 0x409504, 0x00000025);
+ nvkm_wr32(device, 0x409810, 0x00080420);
+ nvkm_wr32(device, 0x409800, 0x00000000);
+ nvkm_wr32(device, 0x409500, 0x00000001);
+ nvkm_wr32(device, 0x409504, 0x00000032);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800))
break;
) < 0)
return -EBUSY;
- if (device->chipset >= 0xe0) {
- nvkm_wr32(device, 0x409800, 0x00000000);
- nvkm_wr32(device, 0x409500, 0x00000001);
- nvkm_wr32(device, 0x409504, 0x00000030);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x409800))
- break;
- ) < 0)
- return -EBUSY;
-
- nvkm_wr32(device, 0x409810, 0xb00095c8);
- nvkm_wr32(device, 0x409800, 0x00000000);
- nvkm_wr32(device, 0x409500, 0x00000001);
- nvkm_wr32(device, 0x409504, 0x00000031);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x409800))
- break;
- ) < 0)
- return -EBUSY;
-
- nvkm_wr32(device, 0x409810, 0x00080420);
- nvkm_wr32(device, 0x409800, 0x00000000);
- nvkm_wr32(device, 0x409500, 0x00000001);
- nvkm_wr32(device, 0x409504, 0x00000032);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x409800))
- break;
- ) < 0)
- return -EBUSY;
+ nvkm_wr32(device, 0x409614, 0x00000070);
+ nvkm_wr32(device, 0x409614, 0x00000770);
+ nvkm_wr32(device, 0x40802c, 0x00000001);
+ }
- nvkm_wr32(device, 0x409614, 0x00000070);
- nvkm_wr32(device, 0x409614, 0x00000770);
- nvkm_wr32(device, 0x40802c, 0x00000001);
+ if (gr->data == NULL) {
+ int ret = gf100_grctx_generate(gr);
+ if (ret) {
+ nvkm_error(subdev, "failed to construct context\n");
+ return ret;
}
+ }
- if (gr->data == NULL) {
- int ret = gf100_grctx_generate(gr);
- if (ret) {
- nvkm_error(subdev, "failed to construct context\n");
- return ret;
- }
- }
+ return 0;
+}
+
+static int
+gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
+{
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
- return 0;
- } else
if (!gr->func->fecs.ucode) {
return -ENOSYS;
}
/* load HUB microcode */
nvkm_mc_unk260(device, 0);
- nvkm_wr32(device, 0x4091c0, 0x01000000);
- for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
- nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
-
- nvkm_wr32(device, 0x409180, 0x01000000);
- for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) {
- if ((i & 0x3f) == 0)
- nvkm_wr32(device, 0x409188, i >> 6);
- nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]);
- }
+ nvkm_falcon_load_dmem(gr->fecs, gr->func->fecs.ucode->data.data, 0x0,
+ gr->func->fecs.ucode->data.size, 0);
+ nvkm_falcon_load_imem(gr->fecs, gr->func->fecs.ucode->code.data, 0x0,
+ gr->func->fecs.ucode->code.size, 0, 0, false);
/* load GPC microcode */
- nvkm_wr32(device, 0x41a1c0, 0x01000000);
- for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++)
- nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]);
-
- nvkm_wr32(device, 0x41a180, 0x01000000);
- for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) {
- if ((i & 0x3f) == 0)
- nvkm_wr32(device, 0x41a188, i >> 6);
- nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
- }
+ nvkm_falcon_load_dmem(gr->gpccs, gr->func->gpccs.ucode->data.data, 0x0,
+ gr->func->gpccs.ucode->data.size, 0);
+ nvkm_falcon_load_imem(gr->gpccs, gr->func->gpccs.ucode->code.data, 0x0,
+ gr->func->gpccs.ucode->code.size, 0, 0, false);
nvkm_mc_unk260(device, 1);
/* load register lists */
@@ -1642,6 +1630,19 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
return 0;
}
+int
+gf100_gr_init_ctxctl(struct gf100_gr *gr)
+{
+ int ret;
+
+ if (gr->firmware)
+ ret = gf100_gr_init_ctxctl_ext(gr);
+ else
+ ret = gf100_gr_init_ctxctl_int(gr);
+
+ return ret;
+}
+
static int
gf100_gr_oneinit(struct nvkm_gr *base)
{
@@ -1711,10 +1712,32 @@ static int
gf100_gr_init_(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
+ struct nvkm_subdev *subdev = &base->engine.subdev;
+ u32 ret;
+
nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
+
+ ret = nvkm_falcon_get(gr->fecs, subdev);
+ if (ret)
+ return ret;
+
+ ret = nvkm_falcon_get(gr->gpccs, subdev);
+ if (ret)
+ return ret;
+
return gr->func->init(gr);
}
+static int
+gf100_gr_fini_(struct nvkm_gr *base, bool suspend)
+{
+ struct gf100_gr *gr = gf100_gr(base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ nvkm_falcon_put(gr->gpccs, subdev);
+ nvkm_falcon_put(gr->fecs, subdev);
+ return 0;
+}
+
void
gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
{
@@ -1737,6 +1760,9 @@ gf100_gr_dtor(struct nvkm_gr *base)
gr->func->dtor(gr);
kfree(gr->data);
+ nvkm_falcon_del(&gr->gpccs);
+ nvkm_falcon_del(&gr->fecs);
+
gf100_gr_dtor_fw(&gr->fuc409c);
gf100_gr_dtor_fw(&gr->fuc409d);
gf100_gr_dtor_fw(&gr->fuc41ac);
@@ -1755,10 +1781,12 @@ gf100_gr_ = {
.dtor = gf100_gr_dtor,
.oneinit = gf100_gr_oneinit,
.init = gf100_gr_init_,
+ .fini = gf100_gr_fini_,
.intr = gf100_gr_intr,
.units = gf100_gr_units,
.chan_new = gf100_gr_chan_new,
.object_get = gf100_gr_object_get,
+ .chsw_load = gf100_gr_chsw_load,
};
int
@@ -1828,6 +1856,7 @@ int
gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
int index, struct gf100_gr *gr)
{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
int ret;
gr->func = func;
@@ -1840,7 +1869,11 @@ gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
if (ret)
return ret;
- return 0;
+ ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
+ if (ret)
+ return ret;
+
+ return nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 268b8d60ff73..db6ee3b06841 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -29,6 +29,7 @@
#include <core/gpuobj.h>
#include <subdev/ltc.h>
#include <subdev/mmu.h>
+#include <engine/falcon.h>
#define GPC_MAX 32
#define TPC_MAX_PER_GPC 8
@@ -75,6 +76,8 @@ struct gf100_gr {
const struct gf100_gr_func *func;
struct nvkm_gr base;
+ struct nvkm_falcon *fecs;
+ struct nvkm_falcon *gpccs;
struct gf100_gr_fuc fuc409c;
struct gf100_gr_fuc fuc409d;
struct gf100_gr_fuc fuc41ac;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
index 2e68919f00b2..c711a55ce392 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
@@ -23,6 +23,8 @@
*/
#include "nv50.h"
+#include <nvif/class.h>
+
static const struct nvkm_gr_func
gt200_gr = {
.init = nv50_gr_init,
@@ -31,11 +33,11 @@ gt200_gr = {
.tlb_flush = g84_gr_tlb_flush,
.units = nv50_gr_units,
.sclass = {
- { -1, -1, 0x0030, &nv50_gr_object },
- { -1, -1, 0x502d, &nv50_gr_object },
- { -1, -1, 0x5039, &nv50_gr_object },
- { -1, -1, 0x50c0, &nv50_gr_object },
- { -1, -1, 0x8397, &nv50_gr_object },
+ { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
+ { -1, -1, NV50_TWOD, &nv50_gr_object },
+ { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
+ { -1, -1, NV50_COMPUTE, &nv50_gr_object },
+ { -1, -1, GT200_TESLA, &nv50_gr_object },
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
index 2bf7aac360cc..fa103df32ec7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
@@ -23,6 +23,8 @@
*/
#include "nv50.h"
+#include <nvif/class.h>
+
static const struct nvkm_gr_func
gt215_gr = {
.init = nv50_gr_init,
@@ -31,12 +33,12 @@ gt215_gr = {
.tlb_flush = g84_gr_tlb_flush,
.units = nv50_gr_units,
.sclass = {
- { -1, -1, 0x0030, &nv50_gr_object },
- { -1, -1, 0x502d, &nv50_gr_object },
- { -1, -1, 0x5039, &nv50_gr_object },
- { -1, -1, 0x50c0, &nv50_gr_object },
- { -1, -1, 0x8597, &nv50_gr_object },
- { -1, -1, 0x85c0, &nv50_gr_object },
+ { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
+ { -1, -1, NV50_TWOD, &nv50_gr_object },
+ { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
+ { -1, -1, NV50_COMPUTE, &nv50_gr_object },
+ { -1, -1, GT214_TESLA, &nv50_gr_object },
+ { -1, -1, GT214_COMPUTE, &nv50_gr_object },
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
index 95d5219faf93..eb1a90644752 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
@@ -23,6 +23,8 @@
*/
#include "nv50.h"
+#include <nvif/class.h>
+
static const struct nvkm_gr_func
mcp79_gr = {
.init = nv50_gr_init,
@@ -30,11 +32,11 @@ mcp79_gr = {
.chan_new = nv50_gr_chan_new,
.units = nv50_gr_units,
.sclass = {
- { -1, -1, 0x0030, &nv50_gr_object },
- { -1, -1, 0x502d, &nv50_gr_object },
- { -1, -1, 0x5039, &nv50_gr_object },
- { -1, -1, 0x50c0, &nv50_gr_object },
- { -1, -1, 0x8397, &nv50_gr_object },
+ { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
+ { -1, -1, NV50_TWOD, &nv50_gr_object },
+ { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
+ { -1, -1, NV50_COMPUTE, &nv50_gr_object },
+ { -1, -1, GT200_TESLA, &nv50_gr_object },
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
index 027b58e5976b..c91eb56e9327 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
@@ -23,6 +23,8 @@
*/
#include "nv50.h"
+#include <nvif/class.h>
+
static const struct nvkm_gr_func
mcp89_gr = {
.init = nv50_gr_init,
@@ -31,12 +33,12 @@ mcp89_gr = {
.tlb_flush = g84_gr_tlb_flush,
.units = nv50_gr_units,
.sclass = {
- { -1, -1, 0x0030, &nv50_gr_object },
- { -1, -1, 0x502d, &nv50_gr_object },
- { -1, -1, 0x5039, &nv50_gr_object },
- { -1, -1, 0x50c0, &nv50_gr_object },
- { -1, -1, 0x85c0, &nv50_gr_object },
- { -1, -1, 0x8697, &nv50_gr_object },
+ { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
+ { -1, -1, NV50_TWOD, &nv50_gr_object },
+ { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
+ { -1, -1, NV50_COMPUTE, &nv50_gr_object },
+ { -1, -1, GT214_COMPUTE, &nv50_gr_object },
+ { -1, -1, GT21A_TESLA, &nv50_gr_object },
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index fca67de43f2b..df16ffda1749 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -27,6 +27,8 @@
#include <core/gpuobj.h>
#include <engine/fifo.h>
+#include <nvif/class.h>
+
u64
nv50_gr_units(struct nvkm_gr *gr)
{
@@ -778,11 +780,11 @@ nv50_gr = {
.chan_new = nv50_gr_chan_new,
.units = nv50_gr_units,
.sclass = {
- { -1, -1, 0x0030, &nv50_gr_object },
- { -1, -1, 0x502d, &nv50_gr_object },
- { -1, -1, 0x5039, &nv50_gr_object },
- { -1, -1, 0x5097, &nv50_gr_object },
- { -1, -1, 0x50c0, &nv50_gr_object },
+ { -1, -1, NV_NULL_CLASS, &nv50_gr_object },
+ { -1, -1, NV50_TWOD, &nv50_gr_object },
+ { -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
+ { -1, -1, NV50_TESLA, &nv50_gr_object },
+ { -1, -1, NV50_COMPUTE, &nv50_gr_object },
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index d8adcdf6985a..2a52d9f026ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -15,6 +15,7 @@ struct nvkm_gr_func {
void *(*dtor)(struct nvkm_gr *);
int (*oneinit)(struct nvkm_gr *);
int (*init)(struct nvkm_gr *);
+ int (*fini)(struct nvkm_gr *, bool);
void (*intr)(struct nvkm_gr *);
void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *);
int (*tlb_flush)(struct nvkm_gr *);
@@ -24,6 +25,7 @@ struct nvkm_gr_func {
/* Returns chipset-specific counts of units packed into an u64.
*/
u64 (*units)(struct nvkm_gr *);
+ bool (*chsw_load)(struct nvkm_gr *);
struct nvkm_sclass sclass[];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
new file mode 100644
index 000000000000..584863db9bfc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -0,0 +1,2 @@
+nvkm-y += nvkm/falcon/base.o
+nvkm-y += nvkm/falcon/v1.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
new file mode 100644
index 000000000000..4852f313762f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/mc.h>
+
+void
+nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u16 tag, u8 port, bool secure)
+{
+ if (secure && !falcon->secret) {
+ nvkm_warn(falcon->user,
+ "writing with secure tag on a non-secure falcon!\n");
+ return;
+ }
+
+ falcon->func->load_imem(falcon, data, start, size, tag, port,
+ secure);
+}
+
+void
+nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u8 port)
+{
+ falcon->func->load_dmem(falcon, data, start, size, port);
+}
+
+void
+nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
+ void *data)
+{
+ falcon->func->read_dmem(falcon, start, size, port, data);
+}
+
+void
+nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst)
+{
+ if (!falcon->func->bind_context) {
+ nvkm_error(falcon->user,
+ "Context binding not supported on this falcon!\n");
+ return;
+ }
+
+ falcon->func->bind_context(falcon, inst);
+}
+
+void
+nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
+{
+ falcon->func->set_start_addr(falcon, start_addr);
+}
+
+void
+nvkm_falcon_start(struct nvkm_falcon *falcon)
+{
+ falcon->func->start(falcon);
+}
+
+int
+nvkm_falcon_enable(struct nvkm_falcon *falcon)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ enum nvkm_devidx id = falcon->owner->index;
+ int ret;
+
+ nvkm_mc_enable(device, id);
+ ret = falcon->func->enable(falcon);
+ if (ret) {
+ nvkm_mc_disable(device, id);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nvkm_falcon_disable(struct nvkm_falcon *falcon)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ enum nvkm_devidx id = falcon->owner->index;
+
+ /* already disabled, return or wait_idle will timeout */
+ if (!nvkm_mc_enabled(device, id))
+ return;
+
+ falcon->func->disable(falcon);
+
+ nvkm_mc_disable(device, id);
+}
+
+int
+nvkm_falcon_reset(struct nvkm_falcon *falcon)
+{
+ nvkm_falcon_disable(falcon);
+ return nvkm_falcon_enable(falcon);
+}
+
+int
+nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
+{
+ return falcon->func->wait_for_halt(falcon, ms);
+}
+
+int
+nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
+{
+ return falcon->func->clear_interrupt(falcon, mask);
+}
+
+void
+nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
+{
+ mutex_lock(&falcon->mutex);
+ if (falcon->user == user) {
+ nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
+ falcon->user = NULL;
+ }
+ mutex_unlock(&falcon->mutex);
+}
+
+int
+nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
+{
+ mutex_lock(&falcon->mutex);
+ if (falcon->user) {
+ nvkm_error(user, "%s falcon already acquired by %s!\n",
+ falcon->name, nvkm_subdev_name[falcon->user->index]);
+ mutex_unlock(&falcon->mutex);
+ return -EBUSY;
+ }
+
+ nvkm_debug(user, "acquired %s falcon\n", falcon->name);
+ falcon->user = user;
+ mutex_unlock(&falcon->mutex);
+ return 0;
+}
+
+void
+nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
+ struct nvkm_subdev *subdev, const char *name, u32 addr,
+ struct nvkm_falcon *falcon)
+{
+ u32 reg;
+
+ falcon->func = func;
+ falcon->owner = subdev;
+ falcon->name = name;
+ falcon->addr = addr;
+ mutex_init(&falcon->mutex);
+
+ reg = nvkm_falcon_rd32(falcon, 0x12c);
+ falcon->version = reg & 0xf;
+ falcon->secret = (reg >> 4) & 0x3;
+ falcon->code.ports = (reg >> 8) & 0xf;
+ falcon->data.ports = (reg >> 12) & 0xf;
+
+ reg = nvkm_falcon_rd32(falcon, 0x108);
+ falcon->code.limit = (reg & 0x1ff) << 8;
+ falcon->data.limit = (reg & 0x3fe00) >> 1;
+
+ reg = nvkm_falcon_rd32(falcon, 0xc08);
+ falcon->debug = (reg >> 20) & 0x1;
+}
+
+void
+nvkm_falcon_del(struct nvkm_falcon **pfalcon)
+{
+ if (*pfalcon) {
+ kfree(*pfalcon);
+ *pfalcon = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
new file mode 100644
index 000000000000..97b56f759d0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -0,0 +1,8 @@
+#ifndef __NVKM_FALCON_PRIV_H__
+#define __NVKM_FALCON_PRIV_H__
+#include <engine/falcon.h>
+
+void
+nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *,
+ const char *, u32, struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
new file mode 100644
index 000000000000..b537f111f39c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/gpuobj.h>
+#include <core/memory.h>
+#include <subdev/timer.h>
+
+static void
+nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u16 tag, u8 port, bool secure)
+{
+ u8 rem = size % 4;
+ u32 reg;
+ int i;
+
+ size -= rem;
+
+ reg = start | BIT(24) | (secure ? BIT(28) : 0);
+ nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
+ for (i = 0; i < size / 4; i++) {
+ /* write new tag every 256B */
+ if ((i & 0x3f) == 0)
+ nvkm_falcon_wr32(falcon, 0x188, tag++);
+ nvkm_falcon_wr32(falcon, 0x184, ((u32 *)data)[i]);
+ }
+
+ /*
+ * If size is not a multiple of 4, mask the last work to ensure garbage
+ * does not get written
+ */
+ if (rem) {
+ u32 extra = ((u32 *)data)[i];
+
+ /* write new tag every 256B */
+ if ((i & 0x3f) == 0)
+ nvkm_falcon_wr32(falcon, 0x188, tag++);
+ nvkm_falcon_wr32(falcon, 0x184, extra & (BIT(rem * 8) - 1));
+ ++i;
+ }
+
+ /* code must be padded to 0x40 words */
+ for (; i & 0x3f; i++)
+ nvkm_falcon_wr32(falcon, 0x184, 0);
+}
+
+static void
+nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u8 port)
+{
+ u8 rem = size % 4;
+ int i;
+
+ size -= rem;
+
+ nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 24));
+ for (i = 0; i < size / 4; i++)
+ nvkm_falcon_wr32(falcon, 0x1c4, ((u32 *)data)[i]);
+
+ /*
+ * If size is not a multiple of 4, mask the last work to ensure garbage
+ * does not get read
+ */
+ if (rem) {
+ u32 extra = ((u32 *)data)[i];
+
+ nvkm_falcon_wr32(falcon, 0x1c4, extra & (BIT(rem * 8) - 1));
+ }
+}
+
+static void
+nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
+ u8 port, void *data)
+{
+ u8 rem = size % 4;
+ int i;
+
+ size -= rem;
+
+ nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 25));
+ for (i = 0; i < size / 4; i++)
+ ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4);
+
+ /*
+ * If size is not a multiple of 4, mask the last work to ensure garbage
+ * does not get read
+ */
+ if (rem) {
+ u32 extra = nvkm_falcon_rd32(falcon, 0x1c4);
+
+ for (i = size; i < size + rem; i++) {
+ ((u8 *)data)[i] = (u8)(extra & 0xff);
+ extra >>= 8;
+ }
+ }
+}
+
+static void
+nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
+{
+ u32 inst_loc;
+
+ /* disable instance block binding */
+ if (ctx == NULL) {
+ nvkm_falcon_wr32(falcon, 0x10c, 0x0);
+ return;
+ }
+
+ nvkm_falcon_wr32(falcon, 0x10c, 0x1);
+
+ /* setup apertures - virtual */
+ nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_UCODE, 0x4);
+ nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_VIRT, 0x0);
+ /* setup apertures - physical */
+ nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
+ nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
+ nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
+
+ /* Set context */
+ switch (nvkm_memory_target(ctx->memory)) {
+ case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
+ case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ /* Enable context */
+ nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
+ nvkm_falcon_wr32(falcon, 0x480,
+ ((ctx->addr >> 12) & 0xfffffff) |
+ (inst_loc << 28) | (1 << 30));
+}
+
+static void
+nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
+{
+ nvkm_falcon_wr32(falcon, 0x104, start_addr);
+}
+
+static void
+nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
+{
+ u32 reg = nvkm_falcon_rd32(falcon, 0x100);
+
+ if (reg & BIT(6))
+ nvkm_falcon_wr32(falcon, 0x130, 0x2);
+ else
+ nvkm_falcon_wr32(falcon, 0x100, 0x2);
+}
+
+static int
+nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ /* clear interrupt(s) */
+ nvkm_falcon_mask(falcon, 0x004, mask, mask);
+ /* wait until interrupts are cleared */
+ ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+falcon_v1_wait_idle(struct nvkm_falcon *falcon)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
+{
+ struct nvkm_device *device = falcon->owner->device;
+ int ret;
+
+ ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0);
+ if (ret < 0) {
+ nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n");
+ return ret;
+ }
+
+ ret = falcon_v1_wait_idle(falcon);
+ if (ret)
+ return ret;
+
+ /* enable IRQs */
+ nvkm_falcon_wr32(falcon, 0x010, 0xff);
+
+ return 0;
+}
+
+static void
+nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
+{
+ /* disable IRQs and wait for any previous code to complete */
+ nvkm_falcon_wr32(falcon, 0x014, 0xff);
+ falcon_v1_wait_idle(falcon);
+}
+
+static const struct nvkm_falcon_func
+nvkm_falcon_v1 = {
+ .load_imem = nvkm_falcon_v1_load_imem,
+ .load_dmem = nvkm_falcon_v1_load_dmem,
+ .read_dmem = nvkm_falcon_v1_read_dmem,
+ .bind_context = nvkm_falcon_v1_bind_context,
+ .start = nvkm_falcon_v1_start,
+ .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
+ .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
+ .enable = nvkm_falcon_v1_enable,
+ .disable = nvkm_falcon_v1_disable,
+ .set_start_addr = nvkm_falcon_v1_set_start_addr,
+};
+
+int
+nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
+ struct nvkm_falcon **pfalcon)
+{
+ struct nvkm_falcon *falcon;
+ if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index be57220a2e01..6b4f1e06a38f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -19,6 +19,7 @@ nvkm-y += nvkm/subdev/bios/pcir.o
nvkm-y += nvkm/subdev/bios/perf.o
nvkm-y += nvkm/subdev/bios/pll.o
nvkm-y += nvkm/subdev/bios/pmu.o
+nvkm-y += nvkm/subdev/bios/power_budget.o
nvkm-y += nvkm/subdev/bios/ramcfg.o
nvkm-y += nvkm/subdev/bios/rammap.o
nvkm-y += nvkm/subdev/bios/shadow.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c
new file mode 100644
index 000000000000..617bfffce4ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/power_budget.h>
+
+static u32
+nvbios_power_budget_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt,
+ u8 *len)
+{
+ struct bit_entry bit_P;
+ u32 power_budget;
+
+ if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 ||
+ bit_P.length < 0x2c)
+ return 0;
+
+ power_budget = nvbios_rd32(bios, bit_P.offset + 0x2c);
+ if (!power_budget)
+ return 0;
+
+ *ver = nvbios_rd08(bios, power_budget);
+ switch (*ver) {
+ case 0x20:
+ case 0x30:
+ *hdr = nvbios_rd08(bios, power_budget + 0x1);
+ *len = nvbios_rd08(bios, power_budget + 0x2);
+ *cnt = nvbios_rd08(bios, power_budget + 0x3);
+ return power_budget;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int
+nvbios_power_budget_header(struct nvkm_bios *bios,
+ struct nvbios_power_budget *budget)
+{
+ struct nvkm_subdev *subdev = &bios->subdev;
+ u8 ver, hdr, cnt, len, cap_entry;
+ u32 header;
+
+ if (!bios || !budget)
+ return -EINVAL;
+
+ header = nvbios_power_budget_table(bios, &ver, &hdr, &cnt, &len);
+ if (!header || !cnt)
+ return -ENODEV;
+
+ switch (ver) {
+ case 0x20:
+ cap_entry = nvbios_rd08(bios, header + 0x9);
+ break;
+ case 0x30:
+ cap_entry = nvbios_rd08(bios, header + 0xa);
+ break;
+ default:
+ cap_entry = 0xff;
+ }
+
+ if (cap_entry >= cnt && cap_entry != 0xff) {
+ nvkm_warn(subdev,
+ "invalid cap_entry in power budget table found\n");
+ budget->cap_entry = 0xff;
+ return -EINVAL;
+ }
+
+ budget->offset = header;
+ budget->ver = ver;
+ budget->hlen = hdr;
+ budget->elen = len;
+ budget->ecount = cnt;
+
+ budget->cap_entry = cap_entry;
+
+ return 0;
+}
+
+int
+nvbios_power_budget_entry(struct nvkm_bios *bios,
+ struct nvbios_power_budget *budget,
+ u8 idx, struct nvbios_power_budget_entry *entry)
+{
+ u32 entry_offset;
+
+ if (!bios || !budget || !budget->offset || idx >= budget->ecount
+ || !entry)
+ return -EINVAL;
+
+ entry_offset = budget->offset + budget->hlen + idx * budget->elen;
+
+ if (budget->ver >= 0x20) {
+ entry->min_w = nvbios_rd32(bios, entry_offset + 0x2);
+ entry->avg_w = nvbios_rd32(bios, entry_offset + 0x6);
+ entry->max_w = nvbios_rd32(bios, entry_offset + 0xa);
+ } else {
+ entry->min_w = 0;
+ entry->max_w = nvbios_rd32(bios, entry_offset + 0x2);
+ entry->avg_w = entry->max_w;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 5841f297973c..da1770e47490 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -112,7 +112,7 @@ read_pll_src(struct nv50_clk *clk, u32 base)
M = (coef & 0x000000ff) >> 0;
break;
default:
- BUG_ON(1);
+ BUG();
}
if (M)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index c714b097719c..59362f8dee22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -50,7 +50,7 @@ nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
if (!ret) {
nvkm_error(subdev, "failed pll calculation\n");
- return ret;
+ return -EINVAL;
}
switch (info.type) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 093223d1df4f..6758da93a3a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -445,7 +445,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
{
struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
struct nvkm_mm *mm = &ram->vram;
- struct nvkm_mm_node *r;
+ struct nvkm_mm_node **node, *r;
struct nvkm_mem *mem;
int type = (memtype & 0x0ff);
int back = (memtype & 0x800);
@@ -462,7 +462,6 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
if (!mem)
return -ENOMEM;
- INIT_LIST_HEAD(&mem->regions);
mem->size = size;
mutex_lock(&ram->fb->subdev.mutex);
@@ -478,6 +477,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
}
mem->memtype = type;
+ node = &mem->mem;
do {
if (back)
ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
@@ -489,13 +489,13 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
return ret;
}
- list_add_tail(&r->rl_entry, &mem->regions);
+ *node = r;
+ node = &r->next;
size -= r->length;
} while (size);
mutex_unlock(&ram->fb->subdev.mutex);
- r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
- mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
+ mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
*pmem = mem;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 7904fa41acef..fb8a1239743d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -989,7 +989,7 @@ gk104_pll_calc_hiclk(int target_khz, int crystal,
int *N1, int *fN1, int *M1, int *P1,
int *N2, int *M2, int *P2)
{
- int best_clk = 0, best_err = target_khz, p_ref, n_ref;
+ int best_err = target_khz, p_ref, n_ref;
bool upper = false;
*M1 = 1;
@@ -1010,7 +1010,6 @@ gk104_pll_calc_hiclk(int target_khz, int crystal,
/* we found a better combination */
if (cur_err < best_err) {
best_err = cur_err;
- best_clk = cur_clk;
*N2 = cur_N;
*N1 = n_ref;
*P1 = p_ref;
@@ -1022,7 +1021,6 @@ gk104_pll_calc_hiclk(int target_khz, int crystal,
- target_khz;
if (cur_err < best_err) {
best_err = cur_err;
- best_clk = cur_clk;
*N2 = cur_N;
*N1 = n_ref;
*P1 = p_ref;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index 0a0e44b75577..017a91de74a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -39,7 +39,7 @@ mcp77_ram_init(struct nvkm_ram *base)
u32 flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
/* Enable NISO poller for various clients and set their associated
- * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
+ * read address, only for MCP77/78 and MCP79/7A. (fd#27501)
*/
nvkm_wr32(device, 0x100c18, dniso);
nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index 87bde8ff2d6b..6549b0588309 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -496,15 +496,12 @@ nv50_ram_tidy(struct nvkm_ram *base)
void
__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
{
- struct nvkm_mm_node *this;
-
- while (!list_empty(&mem->regions)) {
- this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
-
- list_del(&this->rl_entry);
- nvkm_mm_free(&ram->vram, &this);
+ struct nvkm_mm_node *next = mem->mem;
+ struct nvkm_mm_node *node;
+ while ((node = next)) {
+ next = node->next;
+ nvkm_mm_free(&ram->vram, &node);
}
-
nvkm_mm_free(&ram->tags, &mem->tag);
}
@@ -530,7 +527,7 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
{
struct nvkm_mm *heap = &ram->vram;
struct nvkm_mm *tags = &ram->tags;
- struct nvkm_mm_node *r;
+ struct nvkm_mm_node **node, *r;
struct nvkm_mem *mem;
int comp = (memtype & 0x300) >> 8;
int type = (memtype & 0x07f);
@@ -559,11 +556,11 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
comp = 0;
}
- INIT_LIST_HEAD(&mem->regions);
mem->memtype = (comp << 7) | type;
mem->size = max;
type = nv50_fb_memtype[type];
+ node = &mem->mem;
do {
if (back)
ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r);
@@ -575,13 +572,13 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
return ret;
}
- list_add_tail(&r->rl_entry, &mem->regions);
+ *node = r;
+ node = &r->next;
max -= r->length;
} while (max);
mutex_unlock(&ram->fb->subdev.mutex);
- r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
- mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
+ mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
*pmem = mem;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index f0af2a381eea..fecfa6afcf54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -26,6 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/extdev.h>
#include <subdev/bios/iccsense.h>
+#include <subdev/bios/power_budget.h>
#include <subdev/i2c.h>
static bool
@@ -216,10 +217,25 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
struct nvkm_bios *bios = subdev->device->bios;
+ struct nvbios_power_budget budget;
struct nvbios_iccsense stbl;
- int i;
+ int i, ret;
- if (!bios || nvbios_iccsense_parse(bios, &stbl) || !stbl.nr_entry)
+ if (!bios)
+ return 0;
+
+ ret = nvbios_power_budget_header(bios, &budget);
+ if (!ret && budget.cap_entry != 0xff) {
+ struct nvbios_power_budget_entry entry;
+ ret = nvbios_power_budget_entry(bios, &budget,
+ budget.cap_entry, &entry);
+ if (!ret) {
+ iccsense->power_w_max = entry.avg_w;
+ iccsense->power_w_crit = entry.max_w;
+ }
+ }
+
+ if (nvbios_iccsense_parse(bios, &stbl) || !stbl.nr_entry)
return 0;
iccsense->data_valid = true;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index a6a7fa0d7679..9dec58ec3d9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -116,7 +116,7 @@ struct gk20a_instmem {
static enum nvkm_memory_target
gk20a_instobj_target(struct nvkm_memory *memory)
{
- return NVKM_MEM_TARGET_HOST;
+ return NVKM_MEM_TARGET_NCOH;
}
static u64
@@ -305,11 +305,11 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
- struct nvkm_mm_node *r;
+ struct nvkm_mm_node *r = node->base.mem.mem;
unsigned long flags;
int i;
- if (unlikely(list_empty(&node->base.mem.regions)))
+ if (unlikely(!r))
goto out;
spin_lock_irqsave(&imem->lock, flags);
@@ -320,9 +320,6 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
spin_unlock_irqrestore(&imem->lock, flags);
- r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
- rl_entry);
-
/* clear IOMMU bit to unmap pages */
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
@@ -404,10 +401,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
node->r.length = (npages << PAGE_SHIFT) >> 12;
node->base.mem.offset = node->handle;
-
- INIT_LIST_HEAD(&node->base.mem.regions);
- list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
-
+ node->base.mem.mem = &node->r;
return 0;
}
@@ -484,10 +478,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
-
- INIT_LIST_HEAD(&node->base.mem.regions);
- list_add_tail(&r->rl_entry, &node->base.mem.regions);
-
+ node->base.mem.mem = r;
return 0;
release_area:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 6b25e25f9eba..09f669ac6630 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -161,6 +161,16 @@ nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
}
}
+bool
+nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+
+ return (pmc_enable != 0) &&
+ ((nvkm_rd32(device, 0x000200) & pmc_enable) == pmc_enable);
+}
+
+
static int
nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 5df9669ea39c..d06ad2c372bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -31,7 +31,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
{
struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_mm_node *r;
+ struct nvkm_mm_node *r = node->mem;
int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
@@ -41,7 +41,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
u32 end, len;
delta = 0;
- list_for_each_entry(r, &node->regions, rl_entry) {
+ while (r) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
@@ -65,7 +65,8 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
delta += (u64)len << vma->node->type;
}
- }
+ r = r->next;
+ };
mmu->func->flush(vm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 2a31b7d66a6d..87bf41cef0c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -6,6 +6,7 @@ nvkm-y += nvkm/subdev/pci/nv40.o
nvkm-y += nvkm/subdev/pci/nv46.o
nvkm-y += nvkm/subdev/pci/nv4c.o
nvkm-y += nvkm/subdev/pci/g84.o
+nvkm-y += nvkm/subdev/pci/g92.o
nvkm-y += nvkm/subdev/pci/g94.o
nvkm-y += nvkm/subdev/pci/gf100.o
nvkm-y += nvkm/subdev/pci/gf106.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
new file mode 100644
index 000000000000..48874359d5f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+g92_pcie_version_supported(struct nvkm_pci *pci)
+{
+ if ((nvkm_pci_rd32(pci, 0x460) & 0x200) == 0x200)
+ return 2;
+ return 1;
+}
+
+static const struct nvkm_pci_func
+g92_pci_func = {
+ .init = g84_pci_init,
+ .rd32 = nv40_pci_rd32,
+ .wr08 = nv40_pci_wr08,
+ .wr32 = nv40_pci_wr32,
+ .msi_rearm = nv46_pci_msi_rearm,
+
+ .pcie.init = g84_pcie_init,
+ .pcie.set_link = g84_pcie_set_link,
+
+ .pcie.max_speed = g84_pcie_max_speed,
+ .pcie.cur_speed = g84_pcie_cur_speed,
+
+ .pcie.set_version = g84_pcie_set_version,
+ .pcie.version = g84_pcie_version,
+ .pcie.version_supported = g92_pcie_version_supported,
+};
+
+int
+g92_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&g92_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
index 43444123bc04..09adb37a5664 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -23,14 +23,6 @@
*/
#include "priv.h"
-int
-g94_pcie_version_supported(struct nvkm_pci *pci)
-{
- if ((nvkm_pci_rd32(pci, 0x460) & 0x200) == 0x200)
- return 2;
- return 1;
-}
-
static const struct nvkm_pci_func
g94_pci_func = {
.init = g84_pci_init,
@@ -47,7 +39,7 @@ g94_pci_func = {
.pcie.set_version = g84_pcie_set_version,
.pcie.version = g84_pcie_version,
- .pcie.version_supported = g94_pcie_version_supported,
+ .pcie.version_supported = g92_pcie_version_supported,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index e30ea676baf6..00a5e7d3ee9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -92,7 +92,7 @@ gf100_pci_func = {
.pcie.set_version = gf100_pcie_set_version,
.pcie.version = gf100_pcie_version,
- .pcie.version_supported = g94_pcie_version_supported,
+ .pcie.version_supported = g92_pcie_version_supported,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
index c3b798c5c6dd..11bf419afe3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
@@ -39,7 +39,7 @@ gf106_pci_func = {
.pcie.set_version = gf100_pcie_set_version,
.pcie.version = gf100_pcie_version,
- .pcie.version_supported = g94_pcie_version_supported,
+ .pcie.version_supported = g92_pcie_version_supported,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 23de3180aae5..86921ec962d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -44,7 +44,7 @@ enum nvkm_pcie_speed g84_pcie_max_speed(struct nvkm_pci *);
int g84_pcie_init(struct nvkm_pci *);
int g84_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8);
-int g94_pcie_version_supported(struct nvkm_pci *);
+int g92_pcie_version_supported(struct nvkm_pci *);
void gf100_pcie_set_version(struct nvkm_pci *, u8);
int gf100_pcie_version(struct nvkm_pci *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 51fb4bf94a44..ca57c1e491b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -8,5 +8,6 @@ nvkm-y += nvkm/subdev/pmu/gk110.o
nvkm-y += nvkm/subdev/pmu/gk208.o
nvkm-y += nvkm/subdev/pmu/gk20a.o
nvkm-y += nvkm/subdev/pmu/gm107.o
+nvkm-y += nvkm/subdev/pmu/gm20b.o
nvkm-y += nvkm/subdev/pmu/gp100.o
nvkm-y += nvkm/subdev/pmu/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index e611ce80f8ef..a73f690eb4b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -116,6 +116,8 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
static void *
nvkm_pmu_dtor(struct nvkm_subdev *subdev)
{
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ nvkm_falcon_del(&pmu->falcon);
return nvkm_pmu(subdev);
}
@@ -129,15 +131,22 @@ nvkm_pmu = {
};
int
+nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+ int index, struct nvkm_pmu *pmu)
+{
+ nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
+ pmu->func = func;
+ INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
+ init_waitqueue_head(&pmu->recv.wait);
+ return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
+}
+
+int
nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
int index, struct nvkm_pmu **ppmu)
{
struct nvkm_pmu *pmu;
if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
- pmu->func = func;
- INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
- init_waitqueue_head(&pmu->recv.wait);
- return 0;
+ return nvkm_pmu_ctor(func, device, index, *ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index f996d90c9f0d..9ca0db796cbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -19,7 +19,7 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base.subdev)
+#define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base)
#include "priv.h"
#include <subdev/clk.h>
@@ -43,9 +43,8 @@ struct gk20a_pmu {
};
struct gk20a_pmu_dvfs_dev_status {
- unsigned long total;
- unsigned long busy;
- int cur_state;
+ u32 total;
+ u32 busy;
};
static int
@@ -56,13 +55,12 @@ gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
return nvkm_clk_astate(clk, *state, 0, false);
}
-static int
+static void
gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
{
struct nvkm_clk *clk = pmu->base.subdev.device->clk;
*state = clk->pstate;
- return 0;
}
static int
@@ -90,28 +88,26 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
*state = level;
- if (level == cur_level)
- return 0;
- else
- return 1;
+ return (level != cur_level);
}
-static int
+static void
gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
struct gk20a_pmu_dvfs_dev_status *status)
{
- struct nvkm_device *device = pmu->base.subdev.device;
- status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10));
- status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10));
- return 0;
+ struct nvkm_falcon *falcon = pmu->base.falcon;
+
+ status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10));
+ status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10));
}
static void
gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
{
- struct nvkm_device *device = pmu->base.subdev.device;
- nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
- nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
+ struct nvkm_falcon *falcon = pmu->base.falcon;
+
+ nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000);
+ nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000);
}
static void
@@ -127,7 +123,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
struct nvkm_timer *tmr = device->timer;
struct nvkm_volt *volt = device->volt;
u32 utilization = 0;
- int state, ret;
+ int state;
/*
* The PMU is initialized before CLK and VOLT, so we have to make sure the
@@ -136,11 +132,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
if (!clk || !volt)
goto resched;
- ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status);
- if (ret) {
- nvkm_warn(subdev, "failed to get device status\n");
- goto resched;
- }
+ gk20a_pmu_dvfs_get_dev_status(pmu, &status);
if (status.total)
utilization = div_u64((u64)status.busy * 100, status.total);
@@ -150,11 +142,7 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
utilization, data->avg_load);
- ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state);
- if (ret) {
- nvkm_warn(subdev, "failed to get current state\n");
- goto resched;
- }
+ gk20a_pmu_dvfs_get_cur_state(pmu, &state);
if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
nvkm_trace(subdev, "set new state to %d\n", state);
@@ -166,32 +154,36 @@ resched:
nvkm_timer_alarm(tmr, 100000000, alarm);
}
-static int
-gk20a_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
+static void
+gk20a_pmu_fini(struct nvkm_pmu *pmu)
{
- struct gk20a_pmu *pmu = gk20a_pmu(subdev);
- nvkm_timer_alarm_cancel(subdev->device->timer, &pmu->alarm);
- return 0;
-}
+ struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
+ nvkm_timer_alarm_cancel(pmu->subdev.device->timer, &gpmu->alarm);
-static void *
-gk20a_pmu_dtor(struct nvkm_subdev *subdev)
-{
- return gk20a_pmu(subdev);
+ nvkm_falcon_put(pmu->falcon, &pmu->subdev);
}
static int
-gk20a_pmu_init(struct nvkm_subdev *subdev)
+gk20a_pmu_init(struct nvkm_pmu *pmu)
{
- struct gk20a_pmu *pmu = gk20a_pmu(subdev);
- struct nvkm_device *device = pmu->base.subdev.device;
+ struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
+ struct nvkm_subdev *subdev = &pmu->subdev;
+ struct nvkm_device *device = pmu->subdev.device;
+ struct nvkm_falcon *falcon = pmu->falcon;
+ int ret;
+
+ ret = nvkm_falcon_get(falcon, subdev);
+ if (ret) {
+ nvkm_error(subdev, "cannot acquire %s falcon!\n", falcon->name);
+ return ret;
+ }
/* init pwr perf counter */
- nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
- nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
- nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
+ nvkm_falcon_wr32(falcon, 0x504 + (BUSY_SLOT * 0x10), 0x00200001);
+ nvkm_falcon_wr32(falcon, 0x50c + (BUSY_SLOT * 0x10), 0x00000002);
+ nvkm_falcon_wr32(falcon, 0x50c + (CLK_SLOT * 0x10), 0x00000003);
- nvkm_timer_alarm(device->timer, 2000000000, &pmu->alarm);
+ nvkm_timer_alarm(device->timer, 2000000000, &gpmu->alarm);
return 0;
}
@@ -202,26 +194,26 @@ gk20a_dvfs_data= {
.p_smooth = 1,
};
-static const struct nvkm_subdev_func
+static const struct nvkm_pmu_func
gk20a_pmu = {
.init = gk20a_pmu_init,
.fini = gk20a_pmu_fini,
- .dtor = gk20a_pmu_dtor,
+ .reset = gt215_pmu_reset,
};
int
gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- static const struct nvkm_pmu_func func = {};
struct gk20a_pmu *pmu;
if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
- pmu->base.func = &func;
*ppmu = &pmu->base;
- nvkm_subdev_ctor(&gk20a_pmu, device, index, &pmu->base.subdev);
+ nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base);
+
pmu->data = &gk20a_dvfs_data;
nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
new file mode 100644
index 000000000000..0b8a1cc4a0ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+static const struct nvkm_pmu_func
+gm20b_pmu = {
+ .reset = gt215_pmu_reset,
+};
+
+int
+gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+ return nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 2e2179a4ad17..096cba069f72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -4,6 +4,8 @@
#include <subdev/pmu.h>
#include <subdev/pmu/fuc/os.h>
+int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *,
+ int index, struct nvkm_pmu *);
int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
int index, struct nvkm_pmu **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
index b02b868a6589..5076d1500f47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -1,3 +1,7 @@
nvkm-y += nvkm/subdev/secboot/base.o
+nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
+nvkm-y += nvkm/subdev/secboot/acr.o
+nvkm-y += nvkm/subdev/secboot/acr_r352.o
+nvkm-y += nvkm/subdev/secboot/acr_r361.o
nvkm-y += nvkm/subdev/secboot/gm200.o
nvkm-y += nvkm/subdev/secboot/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
new file mode 100644
index 000000000000..75dc06557877
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr.h"
+
+#include <core/firmware.h>
+
+/**
+ * Convenience function to duplicate a firmware file in memory and check that
+ * it has the required minimum size.
+ */
+void *
+nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
+ size_t min_size)
+{
+ const struct firmware *fw;
+ void *blob;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev->device, name, &fw);
+ if (ret)
+ return ERR_PTR(ret);
+ if (fw->size < min_size) {
+ nvkm_error(subdev, "%s is smaller than expected size %zu\n",
+ name, min_size);
+ nvkm_firmware_put(fw);
+ return ERR_PTR(-EINVAL);
+ }
+ blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ nvkm_firmware_put(fw);
+ if (!blob)
+ return ERR_PTR(-ENOMEM);
+
+ return blob;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
new file mode 100644
index 000000000000..97795b342b6f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __NVKM_SECBOOT_ACR_H__
+#define __NVKM_SECBOOT_ACR_H__
+
+#include "priv.h"
+
+struct nvkm_acr;
+
+/**
+ * struct nvkm_acr_func - properties and functions specific to an ACR
+ *
+ * @load: make the ACR ready to run on the given secboot device
+ * @reset: reset the specified falcon
+ * @start: start the specified falcon (assumed to have been reset)
+ */
+struct nvkm_acr_func {
+ void (*dtor)(struct nvkm_acr *);
+ int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *);
+ int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
+ int (*load)(struct nvkm_acr *, struct nvkm_secboot *,
+ struct nvkm_gpuobj *, u64);
+ int (*reset)(struct nvkm_acr *, struct nvkm_secboot *,
+ enum nvkm_secboot_falcon);
+ int (*start)(struct nvkm_acr *, struct nvkm_secboot *,
+ enum nvkm_secboot_falcon);
+};
+
+/**
+ * struct nvkm_acr - instance of an ACR
+ *
+ * @boot_falcon: ID of the falcon that will perform secure boot
+ * @managed_falcons: bitfield of falcons managed by this ACR
+ * @start_address: virtual start address of the HS bootloader
+ */
+struct nvkm_acr {
+ const struct nvkm_acr_func *func;
+ const struct nvkm_subdev *subdev;
+
+ enum nvkm_secboot_falcon boot_falcon;
+ unsigned long managed_falcons;
+ u32 start_address;
+};
+
+void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t);
+
+struct nvkm_acr *acr_r352_new(unsigned long);
+struct nvkm_acr *acr_r361_new(unsigned long);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
new file mode 100644
index 000000000000..1aa37ea18580
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -0,0 +1,936 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr_r352.h"
+
+#include <core/gpuobj.h>
+#include <core/firmware.h>
+#include <engine/falcon.h>
+
+/**
+ * struct hsf_fw_header - HS firmware descriptor
+ * @sig_dbg_offset: offset of the debug signature
+ * @sig_dbg_size: size of the debug signature
+ * @sig_prod_offset: offset of the production signature
+ * @sig_prod_size: size of the production signature
+ * @patch_loc: offset of the offset (sic) of where the signature is
+ * @patch_sig: offset of the offset (sic) to add to sig_*_offset
+ * @hdr_offset: offset of the load header (see struct hs_load_header)
+ * @hdr_size: size of above header
+ *
+ * This structure is embedded in the HS firmware image at
+ * hs_bin_hdr.header_offset.
+ */
+struct hsf_fw_header {
+ u32 sig_dbg_offset;
+ u32 sig_dbg_size;
+ u32 sig_prod_offset;
+ u32 sig_prod_size;
+ u32 patch_loc;
+ u32 patch_sig;
+ u32 hdr_offset;
+ u32 hdr_size;
+};
+
+/**
+ * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
+ * @signature: 16B signature for secure code. 0s if no secure code
+ * @ctx_dma: DMA context to be used by BL while loading code/data
+ * @code_dma_base: 256B-aligned Physical FB Address where code is located
+ * (falcon's $xcbase register)
+ * @non_sec_code_off: offset from code_dma_base where the non-secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @non_sec_code_size: the size of the nonSecure code part.
+ * @sec_code_off: offset from code_dma_base where the secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @sec_code_size: offset from code_dma_base where the secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @code_entry_point: code entry point which will be invoked by BL after
+ * code is loaded.
+ * @data_dma_base: 256B aligned Physical FB Address where data is located.
+ * (falcon's $xdbase register)
+ * @data_size: size of data block. Should be multiple of 256B
+ *
+ * Structure used by the bootloader to load the rest of the code. This has
+ * to be filled by host and copied into DMEM at offset provided in the
+ * hsflcn_bl_desc.bl_desc_dmem_load_off.
+ */
+struct acr_r352_flcn_bl_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u32 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 code_dma_base1;
+ u32 data_dma_base1;
+};
+
+/**
+ * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image
+ */
+static void
+acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *_img, u64 wpr_addr,
+ void *_desc)
+{
+ struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
+ struct acr_r352_flcn_bl_desc *desc = _desc;
+ const struct ls_ucode_img_desc *pdesc = &_img->ucode_desc;
+ u64 base, addr_code, addr_data;
+
+ base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset;
+ addr_code = (base + pdesc->app_resident_code_offset) >> 8;
+ addr_data = (base + pdesc->app_resident_data_offset) >> 8;
+
+ desc->ctx_dma = FALCON_DMAIDX_UCODE;
+ desc->code_dma_base = lower_32_bits(addr_code);
+ desc->code_dma_base1 = upper_32_bits(addr_code);
+ desc->non_sec_code_off = pdesc->app_resident_code_offset;
+ desc->non_sec_code_size = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = lower_32_bits(addr_data);
+ desc->data_dma_base1 = upper_32_bits(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+}
+
+
+/**
+ * struct hsflcn_acr_desc - data section of the HS firmware
+ *
+ * This header is to be copied at the beginning of DMEM by the HS bootloader.
+ *
+ * @signature: signature of ACR ucode
+ * @wpr_region_id: region ID holding the WPR header and its details
+ * @wpr_offset: offset from the WPR region holding the wpr header
+ * @regions: region descriptors
+ * @nonwpr_ucode_blob_size: size of LS blob
+ * @nonwpr_ucode_blob_start: FB location of LS blob is
+ */
+struct hsflcn_acr_desc {
+ union {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ } ucode_reserved_space;
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_mem_range;
+#define FLCN_ACR_MAX_REGIONS 2
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ } region_props[FLCN_ACR_MAX_REGIONS];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+
+
+/*
+ * Low-secure blob creation
+ */
+
+/**
+ * ls_ucode_img_load() - create a lsf_ucode_img and load it
+ */
+struct ls_ucode_img *
+acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
+ enum nvkm_secboot_falcon falcon_id)
+{
+ const struct nvkm_subdev *subdev = acr->base.subdev;
+ struct ls_ucode_img_r352 *img;
+ int ret;
+
+ img = kzalloc(sizeof(*img), GFP_KERNEL);
+ if (!img)
+ return ERR_PTR(-ENOMEM);
+
+ img->base.falcon_id = falcon_id;
+
+ ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
+
+ if (ret) {
+ kfree(img->base.ucode_data);
+ kfree(img->base.sig);
+ kfree(img);
+ return ERR_PTR(ret);
+ }
+
+ /* Check that the signature size matches our expectations... */
+ if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
+ nvkm_error(subdev, "invalid signature size for %s falcon!\n",
+ nvkm_secboot_falcon_name[falcon_id]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Copy signature to the right place */
+ memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
+
+ /* not needed? the signature should already have the right value */
+ img->lsb_header.signature.falcon_id = falcon_id;
+
+ return &img->base;
+}
+
+#define LSF_LSB_HEADER_ALIGN 256
+#define LSF_BL_DATA_ALIGN 256
+#define LSF_BL_DATA_SIZE_ALIGN 256
+#define LSF_BL_CODE_SIZE_ALIGN 256
+#define LSF_UCODE_DATA_ALIGN 4096
+
+/**
+ * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image
+ * @acr: ACR to use
+ * @img: image to generate for
+ * @offset: offset in the WPR region where this image starts
+ *
+ * Allocate space in the WPR area from offset and write the WPR and LSB headers
+ * accordingly.
+ *
+ * Return: offset at the end of this image.
+ */
+static u32
+acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
+ struct ls_ucode_img_r352 *img, u32 offset)
+{
+ struct ls_ucode_img *_img = &img->base;
+ struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
+ struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
+ struct ls_ucode_img_desc *desc = &_img->ucode_desc;
+ const struct acr_r352_ls_func *func =
+ acr->func->ls_func[_img->falcon_id];
+
+ /* Fill WPR header */
+ whdr->falcon_id = _img->falcon_id;
+ whdr->bootstrap_owner = acr->base.boot_falcon;
+ whdr->status = LSF_IMAGE_STATUS_COPY;
+
+ /* Skip bootstrapping falcons started by someone else than ACR */
+ if (acr->lazy_bootstrap & BIT(_img->falcon_id))
+ whdr->lazy_bootstrap = 1;
+
+ /* Align, save off, and include an LSB header size */
+ offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
+ whdr->lsb_offset = offset;
+ offset += sizeof(*lhdr);
+
+ /*
+ * Align, save off, and include the original (static) ucode
+ * image size
+ */
+ offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
+ lhdr->ucode_off = offset;
+ offset += _img->ucode_size;
+
+ /*
+ * For falcons that use a boot loader (BL), we append a loader
+ * desc structure on the end of the ucode image and consider
+ * this the boot loader data. The host will then copy the loader
+ * desc args to this space within the WPR region (before locking
+ * down) and the HS bin will then copy them to DMEM 0 for the
+ * loader.
+ */
+ lhdr->bl_code_size = ALIGN(desc->bootloader_size,
+ LSF_BL_CODE_SIZE_ALIGN);
+ lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
+ LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
+ lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
+ lhdr->bl_code_size - lhdr->ucode_size;
+ /*
+ * Though the BL is located at 0th offset of the image, the VA
+ * is different to make sure that it doesn't collide the actual
+ * OS VA range
+ */
+ lhdr->bl_imem_off = desc->bootloader_imem_offset;
+ lhdr->app_code_off = desc->app_start_offset +
+ desc->app_resident_code_offset;
+ lhdr->app_code_size = desc->app_resident_code_size;
+ lhdr->app_data_off = desc->app_start_offset +
+ desc->app_resident_data_offset;
+ lhdr->app_data_size = desc->app_resident_data_size;
+
+ lhdr->flags = func->lhdr_flags;
+ if (_img->falcon_id == acr->base.boot_falcon)
+ lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
+
+ /* Align and save off BL descriptor size */
+ lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
+
+ /*
+ * Align, save off, and include the additional BL data
+ */
+ offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
+ lhdr->bl_data_off = offset;
+ offset += lhdr->bl_data_size;
+
+ return offset;
+}
+
+/**
+ * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images
+ */
+int
+acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
+{
+ struct ls_ucode_img_r352 *img;
+ struct list_head *l;
+ u32 count = 0;
+ u32 offset;
+
+ /* Count the number of images to manage */
+ list_for_each(l, imgs)
+ count++;
+
+ /*
+ * Start with an array of WPR headers at the base of the WPR.
+ * The expectation here is that the secure falcon will do a single DMA
+ * read of this array and cache it internally so it's ok to pack these.
+ * Also, we add 1 to the falcon count to indicate the end of the array.
+ */
+ offset = sizeof(img->wpr_header) * (count + 1);
+
+ /*
+ * Walk the managed falcons, accounting for the LSB structs
+ * as well as the ucode images.
+ */
+ list_for_each_entry(img, imgs, base.node) {
+ offset = acr_r352_ls_img_fill_headers(acr, img, offset);
+ }
+
+ return offset;
+}
+
+/**
+ * acr_r352_ls_write_wpr - write the WPR blob contents
+ */
+int
+acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
+ struct nvkm_gpuobj *wpr_blob, u32 wpr_addr)
+{
+ struct ls_ucode_img *_img;
+ u32 pos = 0;
+
+ nvkm_kmap(wpr_blob);
+
+ list_for_each_entry(_img, imgs, node) {
+ struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
+ const struct acr_r352_ls_func *ls_func =
+ acr->func->ls_func[_img->falcon_id];
+ u8 gdesc[ls_func->bl_desc_size];
+
+ nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
+ sizeof(img->wpr_header));
+
+ nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
+ &img->lsb_header, sizeof(img->lsb_header));
+
+ /* Generate and write BL descriptor */
+ memset(gdesc, 0, ls_func->bl_desc_size);
+ ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
+
+ nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
+ gdesc, ls_func->bl_desc_size);
+
+ /* Copy ucode */
+ nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
+ _img->ucode_data, _img->ucode_size);
+
+ pos += sizeof(img->wpr_header);
+ }
+
+ nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
+
+ nvkm_done(wpr_blob);
+
+ return 0;
+}
+
+/* Both size and address of WPR need to be 128K-aligned */
+#define WPR_ALIGNMENT 0x20000
+/**
+ * acr_r352_prepare_ls_blob() - prepare the LS blob
+ *
+ * For each securely managed falcon, load the FW, signatures and bootloaders and
+ * prepare a ucode blob. Then, compute the offsets in the WPR region for each
+ * blob, and finally write the headers and ucode blobs into a GPU object that
+ * will be copied into the WPR region by the HS firmware.
+ */
+static int
+acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
+{
+ const struct nvkm_subdev *subdev = acr->base.subdev;
+ struct list_head imgs;
+ struct ls_ucode_img *img, *t;
+ unsigned long managed_falcons = acr->base.managed_falcons;
+ int managed_count = 0;
+ u32 image_wpr_size;
+ int falcon_id;
+ int ret;
+
+ INIT_LIST_HEAD(&imgs);
+
+ /* Load all LS blobs */
+ for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
+ struct ls_ucode_img *img;
+
+ img = acr->func->ls_ucode_img_load(acr, falcon_id);
+ if (IS_ERR(img)) {
+ ret = PTR_ERR(img);
+ goto cleanup;
+ }
+
+ list_add_tail(&img->node, &imgs);
+ managed_count++;
+ }
+
+ /*
+ * Fill the WPR and LSF headers with the right offsets and compute
+ * required WPR size
+ */
+ image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
+ image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
+
+ /* Allocate GPU object that will contain the WPR region */
+ ret = nvkm_gpuobj_new(subdev->device, image_wpr_size, WPR_ALIGNMENT,
+ false, NULL, &acr->ls_blob);
+ if (ret)
+ goto cleanup;
+
+ nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
+ managed_count, image_wpr_size);
+
+ /* If WPR address and size are not fixed, set them to fit the LS blob */
+ if (wpr_size == 0) {
+ wpr_addr = acr->ls_blob->addr;
+ wpr_size = image_wpr_size;
+ /*
+ * But if the WPR region is set by the bootloader, it is illegal for
+ * the HS blob to be larger than this region.
+ */
+ } else if (image_wpr_size > wpr_size) {
+ nvkm_error(subdev, "WPR region too small for FW blob!\n");
+ nvkm_error(subdev, "required: %dB\n", image_wpr_size);
+ nvkm_error(subdev, "available: %dB\n", wpr_size);
+ ret = -ENOSPC;
+ goto cleanup;
+ }
+
+ /* Write LS blob */
+ ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
+ if (ret)
+ nvkm_gpuobj_del(&acr->ls_blob);
+
+cleanup:
+ list_for_each_entry_safe(img, t, &imgs, node) {
+ kfree(img->ucode_data);
+ kfree(img->sig);
+ kfree(img);
+ }
+
+ return ret;
+}
+
+
+
+
+/**
+ * acr_r352_hsf_patch_signature() - patch HS blob with correct signature
+ */
+static void
+acr_r352_hsf_patch_signature(struct nvkm_secboot *sb, void *acr_image)
+{
+ struct fw_bin_header *hsbin_hdr = acr_image;
+ struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
+ void *hs_data = acr_image + hsbin_hdr->data_offset;
+ void *sig;
+ u32 sig_size;
+
+ /* Falcon in debug or production mode? */
+ if (sb->boot_falcon->debug) {
+ sig = acr_image + fw_hdr->sig_dbg_offset;
+ sig_size = fw_hdr->sig_dbg_size;
+ } else {
+ sig = acr_image + fw_hdr->sig_prod_offset;
+ sig_size = fw_hdr->sig_prod_size;
+ }
+
+ /* Patch signature */
+ memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
+}
+
+static void
+acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
+ struct hsflcn_acr_desc *desc)
+{
+ struct nvkm_gpuobj *ls_blob = acr->ls_blob;
+
+ /* WPR region information if WPR is not fixed */
+ if (sb->wpr_size == 0) {
+ u32 wpr_start = ls_blob->addr;
+ u32 wpr_end = wpr_start + ls_blob->size;
+
+ desc->wpr_region_id = 1;
+ desc->regions.no_regions = 2;
+ desc->regions.region_props[0].start_addr = wpr_start >> 8;
+ desc->regions.region_props[0].end_addr = wpr_end >> 8;
+ desc->regions.region_props[0].region_id = 1;
+ desc->regions.region_props[0].read_mask = 0xf;
+ desc->regions.region_props[0].write_mask = 0xc;
+ desc->regions.region_props[0].client_mask = 0x2;
+ } else {
+ desc->ucode_blob_base = ls_blob->addr;
+ desc->ucode_blob_size = ls_blob->size;
+ }
+}
+
+static void
+acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
+ u64 offset)
+{
+ struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
+ u64 addr_code, addr_data;
+
+ addr_code = offset >> 8;
+ addr_data = (offset + hdr->data_dma_base) >> 8;
+
+ bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
+ bl_desc->code_dma_base = lower_32_bits(addr_code);
+ bl_desc->non_sec_code_off = hdr->non_sec_code_off;
+ bl_desc->non_sec_code_size = hdr->non_sec_code_size;
+ bl_desc->sec_code_off = hdr->app[0].sec_code_off;
+ bl_desc->sec_code_size = hdr->app[0].sec_code_size;
+ bl_desc->code_entry_point = 0;
+ bl_desc->data_dma_base = lower_32_bits(addr_data);
+ bl_desc->data_size = hdr->data_size;
+}
+
+/**
+ * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor
+ *
+ * @sb secure boot instance to prepare for
+ * @fw name of the HS firmware to load
+ * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
+ * @bl_desc pointer to the BL descriptor to write for this firmware
+ * @patch whether we should patch the HS descriptor (only for HS loaders)
+ */
+static int
+acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
+ const char *fw, struct nvkm_gpuobj **blob,
+ struct hsf_load_header *load_header, bool patch)
+{
+ struct nvkm_subdev *subdev = &sb->subdev;
+ void *acr_image;
+ struct fw_bin_header *hsbin_hdr;
+ struct hsf_fw_header *fw_hdr;
+ struct hsf_load_header *load_hdr;
+ void *acr_data;
+ int ret;
+
+ acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
+ if (IS_ERR(acr_image))
+ return PTR_ERR(acr_image);
+
+ hsbin_hdr = acr_image;
+ fw_hdr = acr_image + hsbin_hdr->header_offset;
+ load_hdr = acr_image + fw_hdr->hdr_offset;
+ acr_data = acr_image + hsbin_hdr->data_offset;
+
+ /* Patch signature */
+ acr_r352_hsf_patch_signature(sb, acr_image);
+
+ /* Patch descriptor with WPR information? */
+ if (patch) {
+ struct hsflcn_acr_desc *desc;
+
+ desc = acr_data + load_hdr->data_dma_base;
+ acr_r352_fixup_hs_desc(acr, sb, desc);
+ }
+
+ if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
+ nvkm_error(subdev, "more apps (%d) than supported (%d)!",
+ load_hdr->num_apps, ACR_R352_MAX_APPS);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ memcpy(load_header, load_hdr, sizeof(*load_header) +
+ (sizeof(load_hdr->app[0]) * load_hdr->num_apps));
+
+ /* Create ACR blob and copy HS data to it */
+ ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
+ 0x1000, false, NULL, blob);
+ if (ret)
+ goto cleanup;
+
+ nvkm_kmap(*blob);
+ nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
+ nvkm_done(*blob);
+
+cleanup:
+ kfree(acr_image);
+
+ return ret;
+}
+
+static int
+acr_r352_prepare_hsbl_blob(struct acr_r352 *acr)
+{
+ const struct nvkm_subdev *subdev = acr->base.subdev;
+ struct fw_bin_header *hdr;
+ struct fw_bl_desc *hsbl_desc;
+
+ acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
+ if (IS_ERR(acr->hsbl_blob)) {
+ int ret = PTR_ERR(acr->hsbl_blob);
+
+ acr->hsbl_blob = NULL;
+ return ret;
+ }
+
+ hdr = acr->hsbl_blob;
+ hsbl_desc = acr->hsbl_blob + hdr->header_offset;
+
+ /* virtual start address for boot vector */
+ acr->base.start_address = hsbl_desc->start_tag << 8;
+
+ return 0;
+}
+
+/**
+ * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
+ *
+ * This includes the LS blob, HS ucode loading blob, and HS bootloader.
+ *
+ * The HS ucode unload blob is only used on dGPU if the WPR region is variable.
+ */
+int
+acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
+{
+ int ret;
+
+ /* Firmware already loaded? */
+ if (acr->firmware_ok)
+ return 0;
+
+ /* Load and prepare the managed falcon's firmwares */
+ ret = acr_r352_prepare_ls_blob(acr, sb->wpr_addr, sb->wpr_size);
+ if (ret)
+ return ret;
+
+ /* Load the HS firmware that will load the LS firmwares */
+ if (!acr->load_blob) {
+ ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
+ &acr->load_blob,
+ &acr->load_bl_header, true);
+ if (ret)
+ return ret;
+ }
+
+ /* If the ACR region is dynamically programmed, we need an unload FW */
+ if (sb->wpr_size == 0) {
+ ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
+ &acr->unload_blob,
+ &acr->unload_bl_header, false);
+ if (ret)
+ return ret;
+ }
+
+ /* Load the HS firmware bootloader */
+ if (!acr->hsbl_blob) {
+ ret = acr_r352_prepare_hsbl_blob(acr);
+ if (ret)
+ return ret;
+ }
+
+ acr->firmware_ok = true;
+ nvkm_debug(&sb->subdev, "LS blob successfully created\n");
+
+ return 0;
+}
+
+/**
+ * acr_r352_load() - prepare HS falcon to run the specified blob, mapped
+ * at GPU address offset.
+ */
+static int
+acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
+ struct nvkm_gpuobj *blob, u64 offset)
+{
+ struct acr_r352 *acr = acr_r352(_acr);
+ struct nvkm_falcon *falcon = sb->boot_falcon;
+ struct fw_bin_header *hdr = acr->hsbl_blob;
+ struct fw_bl_desc *hsbl_desc = acr->hsbl_blob + hdr->header_offset;
+ void *blob_data = acr->hsbl_blob + hdr->data_offset;
+ void *hsbl_code = blob_data + hsbl_desc->code_off;
+ void *hsbl_data = blob_data + hsbl_desc->data_off;
+ u32 code_size = ALIGN(hsbl_desc->code_size, 256);
+ const struct hsf_load_header *load_hdr;
+ const u32 bl_desc_size = acr->func->hs_bl_desc_size;
+ u8 bl_desc[bl_desc_size];
+
+ /* Find the bootloader descriptor for our blob and copy it */
+ if (blob == acr->load_blob) {
+ load_hdr = &acr->load_bl_header;
+ } else if (blob == acr->unload_blob) {
+ load_hdr = &acr->unload_bl_header;
+ } else {
+ nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Copy HS bootloader data
+ */
+ nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
+
+ /* Copy HS bootloader code to end of IMEM */
+ nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
+ code_size, hsbl_desc->start_tag, 0, false);
+
+ /* Generate the BL header */
+ memset(bl_desc, 0, bl_desc_size);
+ acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
+
+ /*
+ * Copy HS BL header where the HS descriptor expects it to be
+ */
+ nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
+ bl_desc_size, 0);
+
+ return 0;
+}
+
+static int
+acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
+{
+ int i;
+
+ /* Run the unload blob to unprotect the WPR region */
+ if (acr->unload_blob && sb->wpr_set) {
+ int ret;
+
+ nvkm_debug(&sb->subdev, "running HS unload blob\n");
+ ret = sb->func->run_blob(sb, acr->unload_blob);
+ if (ret)
+ return ret;
+ nvkm_debug(&sb->subdev, "HS unload blob completed\n");
+ }
+
+ for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
+ acr->falcon_state[i] = NON_SECURE;
+
+ sb->wpr_set = false;
+
+ return 0;
+}
+
+static int
+acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
+{
+ int ret;
+
+ if (sb->wpr_set)
+ return 0;
+
+ /* Make sure all blobs are ready */
+ ret = acr_r352_load_blobs(acr, sb);
+ if (ret)
+ return ret;
+
+ nvkm_debug(&sb->subdev, "running HS load blob\n");
+ ret = sb->func->run_blob(sb, acr->load_blob);
+ /* clear halt interrupt */
+ nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
+ if (ret)
+ return ret;
+ nvkm_debug(&sb->subdev, "HS load blob completed\n");
+
+ sb->wpr_set = true;
+
+ return 0;
+}
+
+/*
+ * acr_r352_reset() - execute secure boot from the prepared state
+ *
+ * Load the HS bootloader and ask the falcon to run it. This will in turn
+ * load the HS firmware and run it, so once the falcon stops all the managed
+ * falcons should have their LS firmware loaded and be ready to run.
+ */
+static int
+acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
+ enum nvkm_secboot_falcon falcon)
+{
+ struct acr_r352 *acr = acr_r352(_acr);
+ int ret;
+
+ /*
+ * Dummy GM200 implementation: perform secure boot each time we are
+ * called on FECS. Since only FECS and GPCCS are managed and started
+ * together, this ought to be safe.
+ *
+ * Once we have proper PMU firmware and support, this will be changed
+ * to a proper call to the PMU method.
+ */
+ if (falcon != NVKM_SECBOOT_FALCON_FECS)
+ goto end;
+
+ ret = acr_r352_shutdown(acr, sb);
+ if (ret)
+ return ret;
+
+ acr_r352_bootstrap(acr, sb);
+ if (ret)
+ return ret;
+
+end:
+ acr->falcon_state[falcon] = RESET;
+ return 0;
+}
+
+static int
+acr_r352_start(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
+ enum nvkm_secboot_falcon falcon)
+{
+ struct acr_r352 *acr = acr_r352(_acr);
+ const struct nvkm_subdev *subdev = &sb->subdev;
+ int base;
+
+ switch (falcon) {
+ case NVKM_SECBOOT_FALCON_FECS:
+ base = 0x409000;
+ break;
+ case NVKM_SECBOOT_FALCON_GPCCS:
+ base = 0x41a000;
+ break;
+ default:
+ nvkm_error(subdev, "cannot start unhandled falcon!\n");
+ return -EINVAL;
+ }
+
+ nvkm_wr32(subdev->device, base + 0x130, 0x00000002);
+ acr->falcon_state[falcon] = RUNNING;
+
+ return 0;
+}
+
+static int
+acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
+{
+ struct acr_r352 *acr = acr_r352(_acr);
+
+ return acr_r352_shutdown(acr, sb);
+}
+
+static void
+acr_r352_dtor(struct nvkm_acr *_acr)
+{
+ struct acr_r352 *acr = acr_r352(_acr);
+
+ nvkm_gpuobj_del(&acr->unload_blob);
+
+ kfree(acr->hsbl_blob);
+ nvkm_gpuobj_del(&acr->load_blob);
+ nvkm_gpuobj_del(&acr->ls_blob);
+
+ kfree(acr);
+}
+
+const struct acr_r352_ls_func
+acr_r352_ls_fecs_func = {
+ .load = acr_ls_ucode_load_fecs,
+ .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
+};
+
+const struct acr_r352_ls_func
+acr_r352_ls_gpccs_func = {
+ .load = acr_ls_ucode_load_gpccs,
+ .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+};
+
+const struct acr_r352_func
+acr_r352_func = {
+ .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
+ .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
+ .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
+ .ls_fill_headers = acr_r352_ls_fill_headers,
+ .ls_write_wpr = acr_r352_ls_write_wpr,
+ .ls_func = {
+ [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
+ [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
+ },
+};
+
+static const struct nvkm_acr_func
+acr_r352_base_func = {
+ .dtor = acr_r352_dtor,
+ .fini = acr_r352_fini,
+ .load = acr_r352_load,
+ .reset = acr_r352_reset,
+ .start = acr_r352_start,
+};
+
+struct nvkm_acr *
+acr_r352_new_(const struct acr_r352_func *func,
+ enum nvkm_secboot_falcon boot_falcon,
+ unsigned long managed_falcons)
+{
+ struct acr_r352 *acr;
+
+ acr = kzalloc(sizeof(*acr), GFP_KERNEL);
+ if (!acr)
+ return ERR_PTR(-ENOMEM);
+
+ acr->base.boot_falcon = boot_falcon;
+ acr->base.managed_falcons = managed_falcons;
+ acr->base.func = &acr_r352_base_func;
+ acr->func = func;
+
+ return &acr->base;
+}
+
+struct nvkm_acr *
+acr_r352_new(unsigned long managed_falcons)
+{
+ return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
+ managed_falcons);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
new file mode 100644
index 000000000000..ad5923b0fd3c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __NVKM_SECBOOT_ACR_R352_H__
+#define __NVKM_SECBOOT_ACR_R352_H__
+
+#include "acr.h"
+#include "ls_ucode.h"
+
+struct ls_ucode_img;
+
+#define ACR_R352_MAX_APPS 8
+
+/*
+ *
+ * LS blob structures
+ *
+ */
+
+/**
+ * struct acr_r352_lsf_lsb_header - LS firmware header
+ * @signature: signature to verify the firmware against
+ * @ucode_off: offset of the ucode blob in the WPR region. The ucode
+ * blob contains the bootloader, code and data of the
+ * LS falcon
+ * @ucode_size: size of the ucode blob, including bootloader
+ * @data_size: size of the ucode blob data
+ * @bl_code_size: size of the bootloader code
+ * @bl_imem_off: offset in imem of the bootloader
+ * @bl_data_off: offset of the bootloader data in WPR region
+ * @bl_data_size: size of the bootloader data
+ * @app_code_off: offset of the app code relative to ucode_off
+ * @app_code_size: size of the app code
+ * @app_data_off: offset of the app data relative to ucode_off
+ * @app_data_size: size of the app data
+ * @flags: flags for the secure bootloader
+ *
+ * This structure is written into the WPR region for each managed falcon. Each
+ * instance is referenced by the lsb_offset member of the corresponding
+ * lsf_wpr_header.
+ */
+struct acr_r352_lsf_lsb_header {
+ /**
+ * LS falcon signatures
+ * @prd_keys: signature to use in production mode
+ * @dgb_keys: signature to use in debug mode
+ * @b_prd_present: whether the production key is present
+ * @b_dgb_present: whether the debug key is present
+ * @falcon_id: ID of the falcon the ucode applies to
+ */
+ struct {
+ u8 prd_keys[2][16];
+ u8 dbg_keys[2][16];
+ u32 b_prd_present;
+ u32 b_dbg_present;
+ u32 falcon_id;
+ } signature;
+ u32 ucode_off;
+ u32 ucode_size;
+ u32 data_size;
+ u32 bl_code_size;
+ u32 bl_imem_off;
+ u32 bl_data_off;
+ u32 bl_data_size;
+ u32 app_code_off;
+ u32 app_code_size;
+ u32 app_data_off;
+ u32 app_data_size;
+ u32 flags;
+#define LSF_FLAG_LOAD_CODE_AT_0 1
+#define LSF_FLAG_DMACTL_REQ_CTX 4
+#define LSF_FLAG_FORCE_PRIV_LOAD 8
+};
+
+/**
+ * struct acr_r352_lsf_wpr_header - LS blob WPR Header
+ * @falcon_id: LS falcon ID
+ * @lsb_offset: offset of the lsb_lsf_header in the WPR region
+ * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
+ * @lazy_bootstrap: skip bootstrapping by ACR
+ * @status: bootstrapping status
+ *
+ * An array of these is written at the beginning of the WPR region, one for
+ * each managed falcon. The array is terminated by an instance which falcon_id
+ * is LSF_FALCON_ID_INVALID.
+ */
+struct acr_r352_lsf_wpr_header {
+ u32 falcon_id;
+ u32 lsb_offset;
+ u32 bootstrap_owner;
+ u32 lazy_bootstrap;
+ u32 status;
+#define LSF_IMAGE_STATUS_NONE 0
+#define LSF_IMAGE_STATUS_COPY 1
+#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
+#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
+#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
+#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
+#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
+};
+
+/**
+ * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
+ */
+struct ls_ucode_img_r352 {
+ struct ls_ucode_img base;
+
+ struct acr_r352_lsf_wpr_header wpr_header;
+ struct acr_r352_lsf_lsb_header lsb_header;
+};
+#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
+
+
+/*
+ * HS blob structures
+ */
+
+struct hsf_load_header_app {
+ u32 sec_code_off;
+ u32 sec_code_size;
+};
+
+/**
+ * struct hsf_load_header - HS firmware load header
+ */
+struct hsf_load_header {
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 num_apps;
+ struct hsf_load_header_app app[0];
+};
+
+/**
+ * struct acr_r352_ls_func - manages a single LS firmware
+ *
+ * @load: load the external firmware into a ls_ucode_img
+ * @generate_bl_desc: function called on a block of bl_desc_size to generate the
+ * proper bootloader descriptor for this LS firmware
+ * @bl_desc_size: size of the bootloader descriptor
+ * @lhdr_flags: LS flags
+ */
+struct acr_r352_ls_func {
+ int (*load)(const struct nvkm_subdev *, struct ls_ucode_img *);
+ void (*generate_bl_desc)(const struct nvkm_acr *,
+ const struct ls_ucode_img *, u64, void *);
+ u32 bl_desc_size;
+ u32 lhdr_flags;
+};
+
+struct acr_r352;
+
+/**
+ * struct acr_r352_func - manages nuances between ACR versions
+ *
+ * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate
+ * the proper HS bootloader descriptor
+ * @hs_bl_desc_size: size of the HS bootloader descriptor
+ */
+struct acr_r352_func {
+ void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *,
+ u64);
+ u32 hs_bl_desc_size;
+
+ struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
+ enum nvkm_secboot_falcon);
+ int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
+ int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
+ struct nvkm_gpuobj *, u32);
+
+ const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END];
+};
+
+/**
+ * struct acr_r352 - ACR data for driver release 352 (and beyond)
+ */
+struct acr_r352 {
+ struct nvkm_acr base;
+ const struct acr_r352_func *func;
+
+ /*
+ * HS FW - lock WPR region (dGPU only) and load LS FWs
+ * on Tegra the HS FW copies the LS blob into the fixed WPR instead
+ */
+ struct nvkm_gpuobj *load_blob;
+ struct {
+ struct hsf_load_header load_bl_header;
+ struct hsf_load_header_app __load_apps[ACR_R352_MAX_APPS];
+ };
+
+ /* HS FW - unlock WPR region (dGPU only) */
+ struct nvkm_gpuobj *unload_blob;
+ struct {
+ struct hsf_load_header unload_bl_header;
+ struct hsf_load_header_app __unload_apps[ACR_R352_MAX_APPS];
+ };
+
+ /* HS bootloader */
+ void *hsbl_blob;
+
+ /* LS FWs, to be loaded by the HS ACR */
+ struct nvkm_gpuobj *ls_blob;
+
+ /* Firmware already loaded? */
+ bool firmware_ok;
+
+ /* Falcons to lazy-bootstrap */
+ u32 lazy_bootstrap;
+
+ /* To keep track of the state of all managed falcons */
+ enum {
+ /* In non-secure state, no firmware loaded, no privileges*/
+ NON_SECURE = 0,
+ /* In low-secure mode and ready to be started */
+ RESET,
+ /* In low-secure mode and running */
+ RUNNING,
+ } falcon_state[NVKM_SECBOOT_FALCON_END];
+};
+#define acr_r352(acr) container_of(acr, struct acr_r352, base)
+
+struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *,
+ enum nvkm_secboot_falcon, unsigned long);
+
+struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
+ enum nvkm_secboot_falcon);
+int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
+int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
+ struct nvkm_gpuobj *, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
new file mode 100644
index 000000000000..f0aff1d98474
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr_r352.h"
+
+#include <engine/falcon.h>
+
+/**
+ * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
+ * @signature: 16B signature for secure code. 0s if no secure code
+ * @ctx_dma: DMA context to be used by BL while loading code/data
+ * @code_dma_base: 256B-aligned Physical FB Address where code is located
+ * (falcon's $xcbase register)
+ * @non_sec_code_off: offset from code_dma_base where the non-secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @non_sec_code_size: the size of the nonSecure code part.
+ * @sec_code_off: offset from code_dma_base where the secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @sec_code_size: offset from code_dma_base where the secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @code_entry_point: code entry point which will be invoked by BL after
+ * code is loaded.
+ * @data_dma_base: 256B aligned Physical FB Address where data is located.
+ * (falcon's $xdbase register)
+ * @data_size: size of data block. Should be multiple of 256B
+ *
+ * Structure used by the bootloader to load the rest of the code. This has
+ * to be filled by host and copied into DMEM at offset provided in the
+ * hsflcn_bl_desc.bl_desc_dmem_load_off.
+ */
+struct acr_r361_flcn_bl_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ struct flcn_u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ struct flcn_u64 data_dma_base;
+ u32 data_size;
+};
+
+static void
+acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *_img, u64 wpr_addr,
+ void *_desc)
+{
+ struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
+ struct acr_r361_flcn_bl_desc *desc = _desc;
+ const struct ls_ucode_img_desc *pdesc = &img->base.ucode_desc;
+ u64 base, addr_code, addr_data;
+
+ base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset;
+ addr_code = base + pdesc->app_resident_code_offset;
+ addr_data = base + pdesc->app_resident_data_offset;
+
+ desc->ctx_dma = FALCON_DMAIDX_UCODE;
+ desc->code_dma_base = u64_to_flcn64(addr_code);
+ desc->non_sec_code_off = pdesc->app_resident_code_offset;
+ desc->non_sec_code_size = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = u64_to_flcn64(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+}
+
+static void
+acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
+ u64 offset)
+{
+ struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc;
+
+ bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
+ bl_desc->code_dma_base = u64_to_flcn64(offset);
+ bl_desc->non_sec_code_off = hdr->non_sec_code_off;
+ bl_desc->non_sec_code_size = hdr->non_sec_code_size;
+ bl_desc->sec_code_off = hdr->app[0].sec_code_off;
+ bl_desc->sec_code_size = hdr->app[0].sec_code_size;
+ bl_desc->code_entry_point = 0;
+ bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
+ bl_desc->data_size = hdr->data_size;
+}
+
+const struct acr_r352_ls_func
+acr_r361_ls_fecs_func = {
+ .load = acr_ls_ucode_load_fecs,
+ .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+};
+
+const struct acr_r352_ls_func
+acr_r361_ls_gpccs_func = {
+ .load = acr_ls_ucode_load_gpccs,
+ .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+};
+
+const struct acr_r352_func
+acr_r361_func = {
+ .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
+ .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+ .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
+ .ls_fill_headers = acr_r352_ls_fill_headers,
+ .ls_write_wpr = acr_r352_ls_write_wpr,
+ .ls_func = {
+ [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
+ [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
+ },
+};
+
+struct nvkm_acr *
+acr_r361_new(unsigned long managed_falcons)
+{
+ return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU,
+ managed_falcons);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 314be2192b7d..27c9dfffb9a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -19,184 +19,108 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+
+/*
+ * Secure boot is the process by which NVIDIA-signed firmware is loaded into
+ * some of the falcons of a GPU. For production devices this is the only way
+ * for the firmware to access useful (but sensitive) registers.
+ *
+ * A Falcon microprocessor supporting advanced security modes can run in one of
+ * three modes:
+ *
+ * - Non-secure (NS). In this mode, functionality is similar to Falcon
+ * architectures before security modes were introduced (pre-Maxwell), but
+ * capability is restricted. In particular, certain registers may be
+ * inaccessible for reads and/or writes, and physical memory access may be
+ * disabled (on certain Falcon instances). This is the only possible mode that
+ * can be used if you don't have microcode cryptographically signed by NVIDIA.
+ *
+ * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
+ * not possible to read or write any Falcon internal state or Falcon registers
+ * from outside the Falcon (for example, from the host system). The only way
+ * to enable this mode is by loading microcode that has been signed by NVIDIA.
+ * (The loading process involves tagging the IMEM block as secure, writing the
+ * signature into a Falcon register, and starting execution. The hardware will
+ * validate the signature, and if valid, grant HS privileges.)
+ *
+ * - Light Secure (LS). In this mode, the microprocessor has more privileges
+ * than NS but fewer than HS. Some of the microprocessor state is visible to
+ * host software to ease debugging. The only way to enable this mode is by HS
+ * microcode enabling LS mode. Some privileges available to HS mode are not
+ * available here. LS mode is introduced in GM20x.
+ *
+ * Secure boot consists in temporarily switching a HS-capable falcon (typically
+ * PMU) into HS mode in order to validate the LS firmwares of managed falcons,
+ * load them, and switch managed falcons into LS mode. Once secure boot
+ * completes, no falcon remains in HS mode.
+ *
+ * Secure boot requires a write-protected memory region (WPR) which can only be
+ * written by the secure falcon. On dGPU, the driver sets up the WPR region in
+ * video memory. On Tegra, it is set up by the bootloader and its location and
+ * size written into memory controller registers.
+ *
+ * The secure boot process takes place as follows:
+ *
+ * 1) A LS blob is constructed that contains all the LS firmwares we want to
+ * load, along with their signatures and bootloaders.
+ *
+ * 2) A HS blob (also called ACR) is created that contains the signed HS
+ * firmware in charge of loading the LS firmwares into their respective
+ * falcons.
+ *
+ * 3) The HS blob is loaded (via its own bootloader) and executed on the
+ * HS-capable falcon. It authenticates itself, switches the secure falcon to
+ * HS mode and setup the WPR region around the LS blob (dGPU) or copies the
+ * LS blob into the WPR region (Tegra).
+ *
+ * 4) The LS blob is now secure from all external tampering. The HS falcon
+ * checks the signatures of the LS firmwares and, if valid, switches the
+ * managed falcons to LS mode and makes them ready to run the LS firmware.
+ *
+ * 5) The managed falcons remain in LS mode and can be started.
+ *
+ */
+
#include "priv.h"
+#include "acr.h"
#include <subdev/mc.h>
#include <subdev/timer.h>
+#include <subdev/pmu.h>
-static const char *
-managed_falcons_names[] = {
+const char *
+nvkm_secboot_falcon_name[] = {
[NVKM_SECBOOT_FALCON_PMU] = "PMU",
[NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
[NVKM_SECBOOT_FALCON_FECS] = "FECS",
[NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
[NVKM_SECBOOT_FALCON_END] = "<invalid>",
};
-
-/*
- * Helper falcon functions
- */
-
-static int
-falcon_clear_halt_interrupt(struct nvkm_device *device, u32 base)
-{
- int ret;
-
- /* clear halt interrupt */
- nvkm_mask(device, base + 0x004, 0x10, 0x10);
- /* wait until halt interrupt is cleared */
- ret = nvkm_wait_msec(device, 10, base + 0x008, 0x10, 0x0);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int
-falcon_wait_idle(struct nvkm_device *device, u32 base)
-{
- int ret;
-
- ret = nvkm_wait_msec(device, 10, base + 0x04c, 0xffff, 0x0);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int
-nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
-{
- struct nvkm_device *device = sb->subdev.device;
- int ret;
-
- /* enable engine */
- nvkm_mc_enable(device, sb->devidx);
- ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
- if (ret < 0) {
- nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
- nvkm_mc_disable(device, sb->devidx);
- return ret;
- }
-
- ret = falcon_wait_idle(device, sb->base);
- if (ret)
- return ret;
-
- /* enable IRQs */
- nvkm_wr32(device, sb->base + 0x010, 0xff);
- nvkm_mc_intr_mask(device, sb->devidx, true);
-
- return 0;
-}
-
-static int
-nvkm_secboot_falcon_disable(struct nvkm_secboot *sb)
-{
- struct nvkm_device *device = sb->subdev.device;
-
- /* disable IRQs and wait for any previous code to complete */
- nvkm_mc_intr_mask(device, sb->devidx, false);
- nvkm_wr32(device, sb->base + 0x014, 0xff);
-
- falcon_wait_idle(device, sb->base);
-
- /* disable engine */
- nvkm_mc_disable(device, sb->devidx);
-
- return 0;
-}
-
-int
-nvkm_secboot_falcon_reset(struct nvkm_secboot *sb)
-{
- int ret;
-
- ret = nvkm_secboot_falcon_disable(sb);
- if (ret)
- return ret;
-
- ret = nvkm_secboot_falcon_enable(sb);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/**
- * nvkm_secboot_falcon_run - run the falcon that will perform secure boot
- *
- * This function is to be called after all chip-specific preparations have
- * been completed. It will start the falcon to perform secure boot, wait for
- * it to halt, and report if an error occurred.
- */
-int
-nvkm_secboot_falcon_run(struct nvkm_secboot *sb)
-{
- struct nvkm_device *device = sb->subdev.device;
- int ret;
-
- /* Start falcon */
- nvkm_wr32(device, sb->base + 0x100, 0x2);
-
- /* Wait for falcon halt */
- ret = nvkm_wait_msec(device, 100, sb->base + 0x100, 0x10, 0x10);
- if (ret < 0)
- return ret;
-
- /* If mailbox register contains an error code, then ACR has failed */
- ret = nvkm_rd32(device, sb->base + 0x040);
- if (ret) {
- nvkm_error(&sb->subdev, "ACR boot failed, ret 0x%08x", ret);
- falcon_clear_halt_interrupt(device, sb->base);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
/**
* nvkm_secboot_reset() - reset specified falcon
*/
int
-nvkm_secboot_reset(struct nvkm_secboot *sb, u32 falcon)
+nvkm_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
{
/* Unmanaged falcon? */
- if (!(BIT(falcon) & sb->func->managed_falcons)) {
+ if (!(BIT(falcon) & sb->acr->managed_falcons)) {
nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
return -EINVAL;
}
- return sb->func->reset(sb, falcon);
-}
-
-/**
- * nvkm_secboot_start() - start specified falcon
- */
-int
-nvkm_secboot_start(struct nvkm_secboot *sb, u32 falcon)
-{
- /* Unmanaged falcon? */
- if (!(BIT(falcon) & sb->func->managed_falcons)) {
- nvkm_error(&sb->subdev, "cannot start unmanaged falcon!\n");
- return -EINVAL;
- }
-
- return sb->func->start(sb, falcon);
+ return sb->acr->func->reset(sb->acr, sb, falcon);
}
/**
* nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
*/
bool
-nvkm_secboot_is_managed(struct nvkm_secboot *secboot,
- enum nvkm_secboot_falcon fid)
+nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid)
{
- if (!secboot)
+ if (!sb)
return false;
- return secboot->func->managed_falcons & BIT(fid);
+ return sb->acr->managed_falcons & BIT(fid);
}
static int
@@ -205,9 +129,19 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
struct nvkm_secboot *sb = nvkm_secboot(subdev);
int ret = 0;
+ switch (sb->acr->boot_falcon) {
+ case NVKM_SECBOOT_FALCON_PMU:
+ sb->boot_falcon = subdev->device->pmu->falcon;
+ break;
+ default:
+ nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
+ nvkm_secboot_falcon_name[sb->acr->boot_falcon]);
+ return -EINVAL;
+ }
+
/* Call chip-specific init function */
- if (sb->func->init)
- ret = sb->func->init(sb);
+ if (sb->func->oneinit)
+ ret = sb->func->oneinit(sb);
if (ret) {
nvkm_error(subdev, "Secure Boot initialization failed: %d\n",
ret);
@@ -249,7 +183,7 @@ nvkm_secboot = {
};
int
-nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
+nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr,
struct nvkm_device *device, int index,
struct nvkm_secboot *sb)
{
@@ -257,22 +191,14 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev);
sb->func = func;
-
- /* setup the performing falcon's base address and masks */
- switch (func->boot_falcon) {
- case NVKM_SECBOOT_FALCON_PMU:
- sb->devidx = NVKM_SUBDEV_PMU;
- sb->base = 0x10a000;
- break;
- default:
- nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
- return -EINVAL;
- };
+ sb->acr = acr;
+ acr->subdev = &sb->subdev;
nvkm_debug(&sb->subdev, "securely managed falcons:\n");
- for_each_set_bit(fid, &sb->func->managed_falcons,
+ for_each_set_bit(fid, &sb->acr->managed_falcons,
NVKM_SECBOOT_FALCON_END)
- nvkm_debug(&sb->subdev, "- %s\n", managed_falcons_names[fid]);
+ nvkm_debug(&sb->subdev, "- %s\n",
+ nvkm_secboot_falcon_name[fid]);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index ec48e4ace37a..813c4eb0b25f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -20,1313 +20,84 @@
* DEALINGS IN THE SOFTWARE.
*/
-/*
- * Secure boot is the process by which NVIDIA-signed firmware is loaded into
- * some of the falcons of a GPU. For production devices this is the only way
- * for the firmware to access useful (but sensitive) registers.
- *
- * A Falcon microprocessor supporting advanced security modes can run in one of
- * three modes:
- *
- * - Non-secure (NS). In this mode, functionality is similar to Falcon
- * architectures before security modes were introduced (pre-Maxwell), but
- * capability is restricted. In particular, certain registers may be
- * inaccessible for reads and/or writes, and physical memory access may be
- * disabled (on certain Falcon instances). This is the only possible mode that
- * can be used if you don't have microcode cryptographically signed by NVIDIA.
- *
- * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
- * not possible to read or write any Falcon internal state or Falcon registers
- * from outside the Falcon (for example, from the host system). The only way
- * to enable this mode is by loading microcode that has been signed by NVIDIA.
- * (The loading process involves tagging the IMEM block as secure, writing the
- * signature into a Falcon register, and starting execution. The hardware will
- * validate the signature, and if valid, grant HS privileges.)
- *
- * - Light Secure (LS). In this mode, the microprocessor has more privileges
- * than NS but fewer than HS. Some of the microprocessor state is visible to
- * host software to ease debugging. The only way to enable this mode is by HS
- * microcode enabling LS mode. Some privileges available to HS mode are not
- * available here. LS mode is introduced in GM20x.
- *
- * Secure boot consists in temporarily switching a HS-capable falcon (typically
- * PMU) into HS mode in order to validate the LS firmwares of managed falcons,
- * load them, and switch managed falcons into LS mode. Once secure boot
- * completes, no falcon remains in HS mode.
- *
- * Secure boot requires a write-protected memory region (WPR) which can only be
- * written by the secure falcon. On dGPU, the driver sets up the WPR region in
- * video memory. On Tegra, it is set up by the bootloader and its location and
- * size written into memory controller registers.
- *
- * The secure boot process takes place as follows:
- *
- * 1) A LS blob is constructed that contains all the LS firmwares we want to
- * load, along with their signatures and bootloaders.
- *
- * 2) A HS blob (also called ACR) is created that contains the signed HS
- * firmware in charge of loading the LS firmwares into their respective
- * falcons.
- *
- * 3) The HS blob is loaded (via its own bootloader) and executed on the
- * HS-capable falcon. It authenticates itself, switches the secure falcon to
- * HS mode and setup the WPR region around the LS blob (dGPU) or copies the
- * LS blob into the WPR region (Tegra).
- *
- * 4) The LS blob is now secure from all external tampering. The HS falcon
- * checks the signatures of the LS firmwares and, if valid, switches the
- * managed falcons to LS mode and makes them ready to run the LS firmware.
- *
- * 5) The managed falcons remain in LS mode and can be started.
- *
- */
-#include "priv.h"
+#include "acr.h"
+#include "gm200.h"
#include <core/gpuobj.h>
-#include <core/firmware.h>
#include <subdev/fb.h>
-
-enum {
- FALCON_DMAIDX_UCODE = 0,
- FALCON_DMAIDX_VIRT = 1,
- FALCON_DMAIDX_PHYS_VID = 2,
- FALCON_DMAIDX_PHYS_SYS_COH = 3,
- FALCON_DMAIDX_PHYS_SYS_NCOH = 4,
-};
-
-/**
- * struct fw_bin_header - header of firmware files
- * @bin_magic: always 0x3b1d14f0
- * @bin_ver: version of the bin format
- * @bin_size: entire image size including this header
- * @header_offset: offset of the firmware/bootloader header in the file
- * @data_offset: offset of the firmware/bootloader payload in the file
- * @data_size: size of the payload
- *
- * This header is located at the beginning of the HS firmware and HS bootloader
- * files, to describe where the headers and data can be found.
- */
-struct fw_bin_header {
- u32 bin_magic;
- u32 bin_ver;
- u32 bin_size;
- u32 header_offset;
- u32 data_offset;
- u32 data_size;
-};
-
-/**
- * struct fw_bl_desc - firmware bootloader descriptor
- * @start_tag: starting tag of bootloader
- * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc
- * @code_off: offset of code section
- * @code_size: size of code section
- * @data_off: offset of data section
- * @data_size: size of data section
- *
- * This structure is embedded in bootloader firmware files at to describe the
- * IMEM and DMEM layout expected by the bootloader.
- */
-struct fw_bl_desc {
- u32 start_tag;
- u32 dmem_load_off;
- u32 code_off;
- u32 code_size;
- u32 data_off;
- u32 data_size;
-};
-
-
-/*
- *
- * LS blob structures
- *
- */
-
-/**
- * struct lsf_ucode_desc - LS falcon signatures
- * @prd_keys: signature to use when the GPU is in production mode
- * @dgb_keys: signature to use when the GPU is in debug mode
- * @b_prd_present: whether the production key is present
- * @b_dgb_present: whether the debug key is present
- * @falcon_id: ID of the falcon the ucode applies to
- *
- * Directly loaded from a signature file.
- */
-struct lsf_ucode_desc {
- u8 prd_keys[2][16];
- u8 dbg_keys[2][16];
- u32 b_prd_present;
- u32 b_dbg_present;
- u32 falcon_id;
-};
-
-/**
- * struct lsf_lsb_header - LS firmware header
- * @signature: signature to verify the firmware against
- * @ucode_off: offset of the ucode blob in the WPR region. The ucode
- * blob contains the bootloader, code and data of the
- * LS falcon
- * @ucode_size: size of the ucode blob, including bootloader
- * @data_size: size of the ucode blob data
- * @bl_code_size: size of the bootloader code
- * @bl_imem_off: offset in imem of the bootloader
- * @bl_data_off: offset of the bootloader data in WPR region
- * @bl_data_size: size of the bootloader data
- * @app_code_off: offset of the app code relative to ucode_off
- * @app_code_size: size of the app code
- * @app_data_off: offset of the app data relative to ucode_off
- * @app_data_size: size of the app data
- * @flags: flags for the secure bootloader
- *
- * This structure is written into the WPR region for each managed falcon. Each
- * instance is referenced by the lsb_offset member of the corresponding
- * lsf_wpr_header.
- */
-struct lsf_lsb_header {
- struct lsf_ucode_desc signature;
- u32 ucode_off;
- u32 ucode_size;
- u32 data_size;
- u32 bl_code_size;
- u32 bl_imem_off;
- u32 bl_data_off;
- u32 bl_data_size;
- u32 app_code_off;
- u32 app_code_size;
- u32 app_data_off;
- u32 app_data_size;
- u32 flags;
-#define LSF_FLAG_LOAD_CODE_AT_0 1
-#define LSF_FLAG_DMACTL_REQ_CTX 4
-#define LSF_FLAG_FORCE_PRIV_LOAD 8
-};
-
-/**
- * struct lsf_wpr_header - LS blob WPR Header
- * @falcon_id: LS falcon ID
- * @lsb_offset: offset of the lsb_lsf_header in the WPR region
- * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
- * @lazy_bootstrap: skip bootstrapping by ACR
- * @status: bootstrapping status
- *
- * An array of these is written at the beginning of the WPR region, one for
- * each managed falcon. The array is terminated by an instance which falcon_id
- * is LSF_FALCON_ID_INVALID.
- */
-struct lsf_wpr_header {
- u32 falcon_id;
- u32 lsb_offset;
- u32 bootstrap_owner;
- u32 lazy_bootstrap;
- u32 status;
-#define LSF_IMAGE_STATUS_NONE 0
-#define LSF_IMAGE_STATUS_COPY 1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
-};
-
-
-/**
- * struct ls_ucode_img_desc - descriptor of firmware image
- * @descriptor_size: size of this descriptor
- * @image_size: size of the whole image
- * @bootloader_start_offset: start offset of the bootloader in ucode image
- * @bootloader_size: size of the bootloader
- * @bootloader_imem_offset: start off set of the bootloader in IMEM
- * @bootloader_entry_point: entry point of the bootloader in IMEM
- * @app_start_offset: start offset of the LS firmware
- * @app_size: size of the LS firmware's code and data
- * @app_imem_offset: offset of the app in IMEM
- * @app_imem_entry: entry point of the app in IMEM
- * @app_dmem_offset: offset of the data in DMEM
- * @app_resident_code_offset: offset of app code from app_start_offset
- * @app_resident_code_size: size of the code
- * @app_resident_data_offset: offset of data from app_start_offset
- * @app_resident_data_size: size of data
- *
- * A firmware image contains the code, data, and bootloader of a given LS
- * falcon in a single blob. This structure describes where everything is.
- *
- * This can be generated from a (bootloader, code, data) set if they have
- * been loaded separately, or come directly from a file.
- */
-struct ls_ucode_img_desc {
- u32 descriptor_size;
- u32 image_size;
- u32 tools_version;
- u32 app_version;
- char date[64];
- u32 bootloader_start_offset;
- u32 bootloader_size;
- u32 bootloader_imem_offset;
- u32 bootloader_entry_point;
- u32 app_start_offset;
- u32 app_size;
- u32 app_imem_offset;
- u32 app_imem_entry;
- u32 app_dmem_offset;
- u32 app_resident_code_offset;
- u32 app_resident_code_size;
- u32 app_resident_data_offset;
- u32 app_resident_data_size;
- u32 nb_overlays;
- struct {u32 start; u32 size; } load_ovl[64];
- u32 compressed;
-};
-
-/**
- * struct ls_ucode_img - temporary storage for loaded LS firmwares
- * @node: to link within lsf_ucode_mgr
- * @falcon_id: ID of the falcon this LS firmware is for
- * @ucode_desc: loaded or generated map of ucode_data
- * @ucode_header: header of the firmware
- * @ucode_data: firmware payload (code and data)
- * @ucode_size: size in bytes of data in ucode_data
- * @wpr_header: WPR header to be written to the LS blob
- * @lsb_header: LSB header to be written to the LS blob
- *
- * Preparing the WPR LS blob requires information about all the LS firmwares
- * (size, etc) to be known. This structure contains all the data of one LS
- * firmware.
- */
-struct ls_ucode_img {
- struct list_head node;
- enum nvkm_secboot_falcon falcon_id;
-
- struct ls_ucode_img_desc ucode_desc;
- u32 *ucode_header;
- u8 *ucode_data;
- u32 ucode_size;
-
- struct lsf_wpr_header wpr_header;
- struct lsf_lsb_header lsb_header;
-};
-
-/**
- * struct ls_ucode_mgr - manager for all LS falcon firmwares
- * @count: number of managed LS falcons
- * @wpr_size: size of the required WPR region in bytes
- * @img_list: linked list of lsf_ucode_img
- */
-struct ls_ucode_mgr {
- u16 count;
- u32 wpr_size;
- struct list_head img_list;
-};
-
-
-/*
- *
- * HS blob structures
- *
- */
-
-/**
- * struct hsf_fw_header - HS firmware descriptor
- * @sig_dbg_offset: offset of the debug signature
- * @sig_dbg_size: size of the debug signature
- * @sig_prod_offset: offset of the production signature
- * @sig_prod_size: size of the production signature
- * @patch_loc: offset of the offset (sic) of where the signature is
- * @patch_sig: offset of the offset (sic) to add to sig_*_offset
- * @hdr_offset: offset of the load header (see struct hs_load_header)
- * @hdr_size: size of above header
- *
- * This structure is embedded in the HS firmware image at
- * hs_bin_hdr.header_offset.
- */
-struct hsf_fw_header {
- u32 sig_dbg_offset;
- u32 sig_dbg_size;
- u32 sig_prod_offset;
- u32 sig_prod_size;
- u32 patch_loc;
- u32 patch_sig;
- u32 hdr_offset;
- u32 hdr_size;
-};
-
-/**
- * struct hsf_load_header - HS firmware load header
- */
-struct hsf_load_header {
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 data_dma_base;
- u32 data_size;
- u32 num_apps;
- struct {
- u32 sec_code_off;
- u32 sec_code_size;
- } app[0];
-};
-
-/**
- * Convenience function to duplicate a firmware file in memory and check that
- * it has the required minimum size.
- */
-static void *
-gm200_secboot_load_firmware(struct nvkm_subdev *subdev, const char *name,
- size_t min_size)
-{
- const struct firmware *fw;
- void *blob;
- int ret;
-
- ret = nvkm_firmware_get(subdev->device, name, &fw);
- if (ret)
- return ERR_PTR(ret);
- if (fw->size < min_size) {
- nvkm_error(subdev, "%s is smaller than expected size %zu\n",
- name, min_size);
- nvkm_firmware_put(fw);
- return ERR_PTR(-EINVAL);
- }
- blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
- nvkm_firmware_put(fw);
- if (!blob)
- return ERR_PTR(-ENOMEM);
-
- return blob;
-}
-
-
-/*
- * Low-secure blob creation
- */
-
-#define BL_DESC_BLK_SIZE 256
-/**
- * Build a ucode image and descriptor from provided bootloader, code and data.
- *
- * @bl: bootloader image, including 16-bytes descriptor
- * @code: LS firmware code segment
- * @data: LS firmware data segment
- * @desc: ucode descriptor to be written
- *
- * Return: allocated ucode image with corresponding descriptor information. desc
- * is also updated to contain the right offsets within returned image.
- */
-static void *
-ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
- const struct firmware *data, struct ls_ucode_img_desc *desc)
-{
- struct fw_bin_header *bin_hdr = (void *)bl->data;
- struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
- void *bl_data = (void *)bl->data + bin_hdr->data_offset;
- u32 pos = 0;
- void *image;
-
- desc->bootloader_start_offset = pos;
- desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
- desc->bootloader_imem_offset = bl_desc->start_tag * 256;
- desc->bootloader_entry_point = bl_desc->start_tag * 256;
-
- pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
- desc->app_start_offset = pos;
- desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
- ALIGN(data->size, BL_DESC_BLK_SIZE);
- desc->app_imem_offset = 0;
- desc->app_imem_entry = 0;
- desc->app_dmem_offset = 0;
- desc->app_resident_code_offset = 0;
- desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
-
- pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
- desc->app_resident_data_offset = pos - desc->app_start_offset;
- desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
-
- desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
- desc->app_size;
-
- image = kzalloc(desc->image_size, GFP_KERNEL);
- if (!image)
- return ERR_PTR(-ENOMEM);
-
- memcpy(image + desc->bootloader_start_offset, bl_data,
- bl_desc->code_size);
- memcpy(image + desc->app_start_offset, code->data, code->size);
- memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
- data->data, data->size);
-
- return image;
-}
-
-/**
- * ls_ucode_img_load_generic() - load and prepare a LS ucode image
- *
- * Load the LS microcode, bootloader and signature and pack them into a single
- * blob. Also generate the corresponding ucode descriptor.
- */
-static int
-ls_ucode_img_load_generic(struct nvkm_subdev *subdev,
- struct ls_ucode_img *img, const char *falcon_name,
- const u32 falcon_id)
-{
- const struct firmware *bl, *code, *data;
- struct lsf_ucode_desc *lsf_desc;
- char f[64];
- int ret;
-
- img->ucode_header = NULL;
-
- snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &bl);
- if (ret)
- goto error;
-
- snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &code);
- if (ret)
- goto free_bl;
-
- snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &data);
- if (ret)
- goto free_inst;
-
- img->ucode_data = ls_ucode_img_build(bl, code, data,
- &img->ucode_desc);
- if (IS_ERR(img->ucode_data)) {
- ret = PTR_ERR(img->ucode_data);
- goto free_data;
- }
- img->ucode_size = img->ucode_desc.image_size;
-
- snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
- lsf_desc = gm200_secboot_load_firmware(subdev, f, sizeof(*lsf_desc));
- if (IS_ERR(lsf_desc)) {
- ret = PTR_ERR(lsf_desc);
- goto free_image;
- }
- /* not needed? the signature should already have the right value */
- lsf_desc->falcon_id = falcon_id;
- memcpy(&img->lsb_header.signature, lsf_desc, sizeof(*lsf_desc));
- img->falcon_id = lsf_desc->falcon_id;
- kfree(lsf_desc);
-
- /* success path - only free requested firmware files */
- goto free_data;
-
-free_image:
- kfree(img->ucode_data);
-free_data:
- nvkm_firmware_put(data);
-free_inst:
- nvkm_firmware_put(code);
-free_bl:
- nvkm_firmware_put(bl);
-error:
- return ret;
-}
-
-typedef int (*lsf_load_func)(struct nvkm_subdev *, struct ls_ucode_img *);
-
-static int
-ls_ucode_img_load_fecs(struct nvkm_subdev *subdev, struct ls_ucode_img *img)
-{
- return ls_ucode_img_load_generic(subdev, img, "fecs",
- NVKM_SECBOOT_FALCON_FECS);
-}
-
-static int
-ls_ucode_img_load_gpccs(struct nvkm_subdev *subdev, struct ls_ucode_img *img)
-{
- return ls_ucode_img_load_generic(subdev, img, "gpccs",
- NVKM_SECBOOT_FALCON_GPCCS);
-}
-
-/**
- * ls_ucode_img_load() - create a lsf_ucode_img and load it
- */
-static struct ls_ucode_img *
-ls_ucode_img_load(struct nvkm_subdev *subdev, lsf_load_func load_func)
-{
- struct ls_ucode_img *img;
- int ret;
-
- img = kzalloc(sizeof(*img), GFP_KERNEL);
- if (!img)
- return ERR_PTR(-ENOMEM);
-
- ret = load_func(subdev, img);
- if (ret) {
- kfree(img);
- return ERR_PTR(ret);
- }
-
- return img;
-}
-
-static const lsf_load_func lsf_load_funcs[] = {
- [NVKM_SECBOOT_FALCON_END] = NULL, /* reserve enough space */
- [NVKM_SECBOOT_FALCON_FECS] = ls_ucode_img_load_fecs,
- [NVKM_SECBOOT_FALCON_GPCCS] = ls_ucode_img_load_gpccs,
-};
-
-/**
- * ls_ucode_img_populate_bl_desc() - populate a DMEM BL descriptor for LS image
- * @img: ucode image to generate against
- * @desc: descriptor to populate
- * @sb: secure boot state to use for base addresses
- *
- * Populate the DMEM BL descriptor with the information contained in a
- * ls_ucode_desc.
- *
- */
-static void
-ls_ucode_img_populate_bl_desc(struct ls_ucode_img *img, u64 wpr_addr,
- struct gm200_flcn_bl_desc *desc)
-{
- struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 addr_base;
-
- addr_base = wpr_addr + img->lsb_header.ucode_off +
- pdesc->app_start_offset;
-
- memset(desc, 0, sizeof(*desc));
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base.lo = lower_32_bits(
- (addr_base + pdesc->app_resident_code_offset));
- desc->code_dma_base.hi = upper_32_bits(
- (addr_base + pdesc->app_resident_code_offset));
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->data_dma_base.lo = lower_32_bits(
- (addr_base + pdesc->app_resident_data_offset));
- desc->data_dma_base.hi = upper_32_bits(
- (addr_base + pdesc->app_resident_data_offset));
- desc->data_size = pdesc->app_resident_data_size;
- desc->code_entry_point = pdesc->app_imem_entry;
-}
-
-#define LSF_LSB_HEADER_ALIGN 256
-#define LSF_BL_DATA_ALIGN 256
-#define LSF_BL_DATA_SIZE_ALIGN 256
-#define LSF_BL_CODE_SIZE_ALIGN 256
-#define LSF_UCODE_DATA_ALIGN 4096
-
-/**
- * ls_ucode_img_fill_headers - fill the WPR and LSB headers of an image
- * @gsb: secure boot device used
- * @img: image to generate for
- * @offset: offset in the WPR region where this image starts
- *
- * Allocate space in the WPR area from offset and write the WPR and LSB headers
- * accordingly.
- *
- * Return: offset at the end of this image.
- */
-static u32
-ls_ucode_img_fill_headers(struct gm200_secboot *gsb, struct ls_ucode_img *img,
- u32 offset)
-{
- struct lsf_wpr_header *whdr = &img->wpr_header;
- struct lsf_lsb_header *lhdr = &img->lsb_header;
- struct ls_ucode_img_desc *desc = &img->ucode_desc;
-
- if (img->ucode_header) {
- nvkm_fatal(&gsb->base.subdev,
- "images withough loader are not supported yet!\n");
- return offset;
- }
-
- /* Fill WPR header */
- whdr->falcon_id = img->falcon_id;
- whdr->bootstrap_owner = gsb->base.func->boot_falcon;
- whdr->status = LSF_IMAGE_STATUS_COPY;
-
- /* Align, save off, and include an LSB header size */
- offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
- whdr->lsb_offset = offset;
- offset += sizeof(struct lsf_lsb_header);
-
- /*
- * Align, save off, and include the original (static) ucode
- * image size
- */
- offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
- lhdr->ucode_off = offset;
- offset += img->ucode_size;
-
- /*
- * For falcons that use a boot loader (BL), we append a loader
- * desc structure on the end of the ucode image and consider
- * this the boot loader data. The host will then copy the loader
- * desc args to this space within the WPR region (before locking
- * down) and the HS bin will then copy them to DMEM 0 for the
- * loader.
- */
- lhdr->bl_code_size = ALIGN(desc->bootloader_size,
- LSF_BL_CODE_SIZE_ALIGN);
- lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
- LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
- lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
- lhdr->bl_code_size - lhdr->ucode_size;
- /*
- * Though the BL is located at 0th offset of the image, the VA
- * is different to make sure that it doesn't collide the actual
- * OS VA range
- */
- lhdr->bl_imem_off = desc->bootloader_imem_offset;
- lhdr->app_code_off = desc->app_start_offset +
- desc->app_resident_code_offset;
- lhdr->app_code_size = desc->app_resident_code_size;
- lhdr->app_data_off = desc->app_start_offset +
- desc->app_resident_data_offset;
- lhdr->app_data_size = desc->app_resident_data_size;
-
- lhdr->flags = 0;
- if (img->falcon_id == gsb->base.func->boot_falcon)
- lhdr->flags = LSF_FLAG_DMACTL_REQ_CTX;
-
- /* GPCCS will be loaded using PRI */
- if (img->falcon_id == NVKM_SECBOOT_FALCON_GPCCS)
- lhdr->flags |= LSF_FLAG_FORCE_PRIV_LOAD;
-
- /* Align (size bloat) and save off BL descriptor size */
- lhdr->bl_data_size = ALIGN(sizeof(struct gm200_flcn_bl_desc),
- LSF_BL_DATA_SIZE_ALIGN);
- /*
- * Align, save off, and include the additional BL data
- */
- offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
- lhdr->bl_data_off = offset;
- offset += lhdr->bl_data_size;
-
- return offset;
-}
-
-static void
-ls_ucode_mgr_init(struct ls_ucode_mgr *mgr)
-{
- memset(mgr, 0, sizeof(*mgr));
- INIT_LIST_HEAD(&mgr->img_list);
-}
-
-static void
-ls_ucode_mgr_cleanup(struct ls_ucode_mgr *mgr)
-{
- struct ls_ucode_img *img, *t;
-
- list_for_each_entry_safe(img, t, &mgr->img_list, node) {
- kfree(img->ucode_data);
- kfree(img->ucode_header);
- kfree(img);
- }
-}
-
-static void
-ls_ucode_mgr_add_img(struct ls_ucode_mgr *mgr, struct ls_ucode_img *img)
-{
- mgr->count++;
- list_add_tail(&img->node, &mgr->img_list);
-}
-
-/**
- * ls_ucode_mgr_fill_headers - fill WPR and LSB headers of all managed images
- */
-static void
-ls_ucode_mgr_fill_headers(struct gm200_secboot *gsb, struct ls_ucode_mgr *mgr)
-{
- struct ls_ucode_img *img;
- u32 offset;
-
- /*
- * Start with an array of WPR headers at the base of the WPR.
- * The expectation here is that the secure falcon will do a single DMA
- * read of this array and cache it internally so it's ok to pack these.
- * Also, we add 1 to the falcon count to indicate the end of the array.
- */
- offset = sizeof(struct lsf_wpr_header) * (mgr->count + 1);
-
- /*
- * Walk the managed falcons, accounting for the LSB structs
- * as well as the ucode images.
- */
- list_for_each_entry(img, &mgr->img_list, node) {
- offset = ls_ucode_img_fill_headers(gsb, img, offset);
- }
-
- mgr->wpr_size = offset;
-}
-
-/**
- * ls_ucode_mgr_write_wpr - write the WPR blob contents
- */
-static int
-ls_ucode_mgr_write_wpr(struct gm200_secboot *gsb, struct ls_ucode_mgr *mgr,
- struct nvkm_gpuobj *wpr_blob)
-{
- struct ls_ucode_img *img;
- u32 pos = 0;
-
- nvkm_kmap(wpr_blob);
-
- list_for_each_entry(img, &mgr->img_list, node) {
- nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
- sizeof(img->wpr_header));
-
- nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
- &img->lsb_header, sizeof(img->lsb_header));
-
- /* Generate and write BL descriptor */
- if (!img->ucode_header) {
- u8 desc[gsb->func->bl_desc_size];
- struct gm200_flcn_bl_desc gdesc;
-
- ls_ucode_img_populate_bl_desc(img, gsb->wpr_addr,
- &gdesc);
- gsb->func->fixup_bl_desc(&gdesc, &desc);
- nvkm_gpuobj_memcpy_to(wpr_blob,
- img->lsb_header.bl_data_off,
- &desc, gsb->func->bl_desc_size);
- }
-
- /* Copy ucode */
- nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
- img->ucode_data, img->ucode_size);
-
- pos += sizeof(img->wpr_header);
- }
-
- nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
-
- nvkm_done(wpr_blob);
-
- return 0;
-}
-
-/* Both size and address of WPR need to be 128K-aligned */
-#define WPR_ALIGNMENT 0x20000
-/**
- * gm200_secboot_prepare_ls_blob() - prepare the LS blob
- *
- * For each securely managed falcon, load the FW, signatures and bootloaders and
- * prepare a ucode blob. Then, compute the offsets in the WPR region for each
- * blob, and finally write the headers and ucode blobs into a GPU object that
- * will be copied into the WPR region by the HS firmware.
- */
-static int
-gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb)
-{
- struct nvkm_secboot *sb = &gsb->base;
- struct nvkm_device *device = sb->subdev.device;
- struct ls_ucode_mgr mgr;
- int falcon_id;
- int ret;
-
- ls_ucode_mgr_init(&mgr);
-
- /* Load all LS blobs */
- for_each_set_bit(falcon_id, &gsb->base.func->managed_falcons,
- NVKM_SECBOOT_FALCON_END) {
- struct ls_ucode_img *img;
-
- img = ls_ucode_img_load(&sb->subdev, lsf_load_funcs[falcon_id]);
-
- if (IS_ERR(img)) {
- ret = PTR_ERR(img);
- goto cleanup;
- }
- ls_ucode_mgr_add_img(&mgr, img);
- }
-
- /*
- * Fill the WPR and LSF headers with the right offsets and compute
- * required WPR size
- */
- ls_ucode_mgr_fill_headers(gsb, &mgr);
- mgr.wpr_size = ALIGN(mgr.wpr_size, WPR_ALIGNMENT);
-
- /* Allocate GPU object that will contain the WPR region */
- ret = nvkm_gpuobj_new(device, mgr.wpr_size, WPR_ALIGNMENT, false, NULL,
- &gsb->ls_blob);
- if (ret)
- goto cleanup;
-
- nvkm_debug(&sb->subdev, "%d managed LS falcons, WPR size is %d bytes\n",
- mgr.count, mgr.wpr_size);
-
- /* If WPR address and size are not fixed, set them to fit the LS blob */
- if (!gsb->wpr_size) {
- gsb->wpr_addr = gsb->ls_blob->addr;
- gsb->wpr_size = gsb->ls_blob->size;
- }
-
- /* Write LS blob */
- ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob);
- if (ret)
- nvkm_gpuobj_del(&gsb->ls_blob);
-
-cleanup:
- ls_ucode_mgr_cleanup(&mgr);
-
- return ret;
-}
-
-/*
- * High-secure blob creation
- */
-
-/**
- * gm200_secboot_hsf_patch_signature() - patch HS blob with correct signature
- */
-static void
-gm200_secboot_hsf_patch_signature(struct gm200_secboot *gsb, void *acr_image)
-{
- struct nvkm_secboot *sb = &gsb->base;
- struct fw_bin_header *hsbin_hdr = acr_image;
- struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
- void *hs_data = acr_image + hsbin_hdr->data_offset;
- void *sig;
- u32 sig_size;
-
- /* Falcon in debug or production mode? */
- if ((nvkm_rd32(sb->subdev.device, sb->base + 0xc08) >> 20) & 0x1) {
- sig = acr_image + fw_hdr->sig_dbg_offset;
- sig_size = fw_hdr->sig_dbg_size;
- } else {
- sig = acr_image + fw_hdr->sig_prod_offset;
- sig_size = fw_hdr->sig_prod_size;
- }
-
- /* Patch signature */
- memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
-}
-
-/**
- * gm200_secboot_populate_hsf_bl_desc() - populate BL descriptor for HS image
- */
-static void
-gm200_secboot_populate_hsf_bl_desc(void *acr_image,
- struct gm200_flcn_bl_desc *bl_desc)
-{
- struct fw_bin_header *hsbin_hdr = acr_image;
- struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
- struct hsf_load_header *load_hdr = acr_image + fw_hdr->hdr_offset;
-
- /*
- * Descriptor for the bootloader that will load the ACR image into
- * IMEM/DMEM memory.
- */
- fw_hdr = acr_image + hsbin_hdr->header_offset;
- load_hdr = acr_image + fw_hdr->hdr_offset;
- memset(bl_desc, 0, sizeof(*bl_desc));
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->non_sec_code_off = load_hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = load_hdr->non_sec_code_size;
- bl_desc->sec_code_off = load_hdr->app[0].sec_code_off;
- bl_desc->sec_code_size = load_hdr->app[0].sec_code_size;
- bl_desc->code_entry_point = 0;
- /*
- * We need to set code_dma_base to the virtual address of the acr_blob,
- * and add this address to data_dma_base before writing it into DMEM
- */
- bl_desc->code_dma_base.lo = 0;
- bl_desc->data_dma_base.lo = load_hdr->data_dma_base;
- bl_desc->data_size = load_hdr->data_size;
-}
-
-/**
- * gm200_secboot_prepare_hs_blob - load and prepare a HS blob and BL descriptor
- *
- * @gsb secure boot instance to prepare for
- * @fw name of the HS firmware to load
- * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
- * @bl_desc pointer to the BL descriptor to write for this firmware
- * @patch whether we should patch the HS descriptor (only for HS loaders)
- */
-static int
-gm200_secboot_prepare_hs_blob(struct gm200_secboot *gsb, const char *fw,
- struct nvkm_gpuobj **blob,
- struct gm200_flcn_bl_desc *bl_desc, bool patch)
-{
- struct nvkm_subdev *subdev = &gsb->base.subdev;
- void *acr_image;
- struct fw_bin_header *hsbin_hdr;
- struct hsf_fw_header *fw_hdr;
- void *acr_data;
- struct hsf_load_header *load_hdr;
- struct hsflcn_acr_desc *desc;
- int ret;
-
- acr_image = gm200_secboot_load_firmware(subdev, fw, 0);
- if (IS_ERR(acr_image))
- return PTR_ERR(acr_image);
- hsbin_hdr = acr_image;
-
- /* Patch signature */
- gm200_secboot_hsf_patch_signature(gsb, acr_image);
-
- acr_data = acr_image + hsbin_hdr->data_offset;
-
- /* Patch descriptor? */
- if (patch) {
- fw_hdr = acr_image + hsbin_hdr->header_offset;
- load_hdr = acr_image + fw_hdr->hdr_offset;
- desc = acr_data + load_hdr->data_dma_base;
- gsb->func->fixup_hs_desc(gsb, desc);
- }
-
- /* Generate HS BL descriptor */
- gm200_secboot_populate_hsf_bl_desc(acr_image, bl_desc);
-
- /* Create ACR blob and copy HS data to it */
- ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
- 0x1000, false, NULL, blob);
- if (ret)
- goto cleanup;
-
- nvkm_kmap(*blob);
- nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
- nvkm_done(*blob);
-
-cleanup:
- kfree(acr_image);
-
- return ret;
-}
-
-/*
- * High-secure bootloader blob creation
- */
-
-static int
-gm200_secboot_prepare_hsbl_blob(struct gm200_secboot *gsb)
-{
- struct nvkm_subdev *subdev = &gsb->base.subdev;
-
- gsb->hsbl_blob = gm200_secboot_load_firmware(subdev, "acr/bl", 0);
- if (IS_ERR(gsb->hsbl_blob)) {
- int ret = PTR_ERR(gsb->hsbl_blob);
-
- gsb->hsbl_blob = NULL;
- return ret;
- }
-
- return 0;
-}
+#include <engine/falcon.h>
+#include <subdev/mc.h>
/**
- * gm20x_secboot_prepare_blobs - load blobs common to all GM20X GPUs.
+ * gm200_secboot_run_blob() - run the given high-secure blob
*
- * This includes the LS blob, HS ucode loading blob, and HS bootloader.
- *
- * The HS ucode unload blob is only used on dGPU.
*/
int
-gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb)
-{
- int ret;
-
- /* Load and prepare the managed falcon's firmwares */
- if (!gsb->ls_blob) {
- ret = gm200_secboot_prepare_ls_blob(gsb);
- if (ret)
- return ret;
- }
-
- /* Load the HS firmware that will load the LS firmwares */
- if (!gsb->acr_load_blob) {
- ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
- &gsb->acr_load_blob,
- &gsb->acr_load_bl_desc, true);
- if (ret)
- return ret;
- }
-
- /* Load the HS firmware bootloader */
- if (!gsb->hsbl_blob) {
- ret = gm200_secboot_prepare_hsbl_blob(gsb);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-gm200_secboot_prepare_blobs(struct gm200_secboot *gsb)
-{
- int ret;
-
- ret = gm20x_secboot_prepare_blobs(gsb);
- if (ret)
- return ret;
-
- /* dGPU only: load the HS firmware that unprotects the WPR region */
- if (!gsb->acr_unload_blob) {
- ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
- &gsb->acr_unload_blob,
- &gsb->acr_unload_bl_desc, false);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-gm200_secboot_blobs_ready(struct gm200_secboot *gsb)
+gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob)
{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
struct nvkm_subdev *subdev = &gsb->base.subdev;
+ struct nvkm_falcon *falcon = gsb->base.boot_falcon;
+ struct nvkm_vma vma;
int ret;
- /* firmware already loaded, nothing to do... */
- if (gsb->firmware_ok)
- return 0;
-
- ret = gsb->func->prepare_blobs(gsb);
- if (ret) {
- nvkm_error(subdev, "failed to load secure firmware\n");
- return ret;
- }
-
- gsb->firmware_ok = true;
-
- return 0;
-}
-
-
-/*
- * Secure Boot Execution
- */
-
-/**
- * gm200_secboot_load_hs_bl() - load HS bootloader into DMEM and IMEM
- */
-static void
-gm200_secboot_load_hs_bl(struct gm200_secboot *gsb, void *data, u32 data_size)
-{
- struct nvkm_device *device = gsb->base.subdev.device;
- struct fw_bin_header *hdr = gsb->hsbl_blob;
- struct fw_bl_desc *hsbl_desc = gsb->hsbl_blob + hdr->header_offset;
- void *blob_data = gsb->hsbl_blob + hdr->data_offset;
- void *hsbl_code = blob_data + hsbl_desc->code_off;
- void *hsbl_data = blob_data + hsbl_desc->data_off;
- u32 code_size = ALIGN(hsbl_desc->code_size, 256);
- const u32 base = gsb->base.base;
- u32 blk;
- u32 tag;
- int i;
-
- /*
- * Copy HS bootloader data
- */
- nvkm_wr32(device, base + 0x1c0, (0x00000000 | (0x1 << 24)));
- for (i = 0; i < hsbl_desc->data_size / 4; i++)
- nvkm_wr32(device, base + 0x1c4, ((u32 *)hsbl_data)[i]);
-
- /*
- * Copy HS bootloader interface structure where the HS descriptor
- * expects it to be
- */
- nvkm_wr32(device, base + 0x1c0,
- (hsbl_desc->dmem_load_off | (0x1 << 24)));
- for (i = 0; i < data_size / 4; i++)
- nvkm_wr32(device, base + 0x1c4, ((u32 *)data)[i]);
-
- /* Copy HS bootloader code to end of IMEM */
- blk = (nvkm_rd32(device, base + 0x108) & 0x1ff) - (code_size >> 8);
- tag = hsbl_desc->start_tag;
- nvkm_wr32(device, base + 0x180, ((blk & 0xff) << 8) | (0x1 << 24));
- for (i = 0; i < code_size / 4; i++) {
- /* write new tag every 256B */
- if ((i & 0x3f) == 0) {
- nvkm_wr32(device, base + 0x188, tag & 0xffff);
- tag++;
- }
- nvkm_wr32(device, base + 0x184, ((u32 *)hsbl_code)[i]);
- }
- nvkm_wr32(device, base + 0x188, 0);
-}
-
-/**
- * gm200_secboot_setup_falcon() - set up the secure falcon for secure boot
- */
-static int
-gm200_secboot_setup_falcon(struct gm200_secboot *gsb)
-{
- struct nvkm_device *device = gsb->base.subdev.device;
- struct fw_bin_header *hdr = gsb->hsbl_blob;
- struct fw_bl_desc *hsbl_desc = gsb->hsbl_blob + hdr->header_offset;
- /* virtual start address for boot vector */
- u32 virt_addr = hsbl_desc->start_tag << 8;
- const u32 base = gsb->base.base;
- const u32 reg_base = base + 0xe00;
- u32 inst_loc;
- int ret;
-
- ret = nvkm_secboot_falcon_reset(&gsb->base);
+ ret = nvkm_falcon_get(falcon, subdev);
if (ret)
return ret;
- /* setup apertures - virtual */
- nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_UCODE), 0x4);
- nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_VIRT), 0x0);
- /* setup apertures - physical */
- nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_VID), 0x4);
- nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_SYS_COH),
- 0x4 | 0x1);
- nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_SYS_NCOH),
- 0x4 | 0x2);
-
- /* Set context */
- if (nvkm_memory_target(gsb->inst->memory) == NVKM_MEM_TARGET_VRAM)
- inst_loc = 0x0; /* FB */
- else
- inst_loc = 0x3; /* Non-coherent sysmem */
-
- nvkm_mask(device, base + 0x048, 0x1, 0x1);
- nvkm_wr32(device, base + 0x480,
- ((gsb->inst->addr >> 12) & 0xfffffff) |
- (inst_loc << 28) | (1 << 30));
-
- /* Set boot vector to code's starting virtual address */
- nvkm_wr32(device, base + 0x104, virt_addr);
-
- return 0;
-}
-
-/**
- * gm200_secboot_run_hs_blob() - run the given high-secure blob
- */
-static int
-gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob,
- struct gm200_flcn_bl_desc *desc)
-{
- struct nvkm_vma vma;
- u64 vma_addr;
- const u32 bl_desc_size = gsb->func->bl_desc_size;
- u8 bl_desc[bl_desc_size];
- int ret;
-
/* Map the HS firmware so the HS bootloader can see it */
ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma);
- if (ret)
+ if (ret) {
+ nvkm_falcon_put(falcon, subdev);
return ret;
+ }
- /* Add the mapping address to the DMA bases */
- vma_addr = flcn64_to_u64(desc->code_dma_base) + vma.offset;
- desc->code_dma_base.lo = lower_32_bits(vma_addr);
- desc->code_dma_base.hi = upper_32_bits(vma_addr);
- vma_addr = flcn64_to_u64(desc->data_dma_base) + vma.offset;
- desc->data_dma_base.lo = lower_32_bits(vma_addr);
- desc->data_dma_base.hi = upper_32_bits(vma_addr);
-
- /* Fixup the BL header */
- gsb->func->fixup_bl_desc(desc, &bl_desc);
-
- /* Reset the falcon and make it ready to run the HS bootloader */
- ret = gm200_secboot_setup_falcon(gsb);
+ /* Reset and set the falcon up */
+ ret = nvkm_falcon_reset(falcon);
if (ret)
- goto done;
+ goto end;
+ nvkm_falcon_bind_context(falcon, gsb->inst);
/* Load the HS bootloader into the falcon's IMEM/DMEM */
- gm200_secboot_load_hs_bl(gsb, &bl_desc, bl_desc_size);
-
- /* Start the HS bootloader */
- ret = nvkm_secboot_falcon_run(&gsb->base);
+ ret = sb->acr->func->load(sb->acr, &gsb->base, blob, vma.offset);
if (ret)
- goto done;
-
-done:
- /* Restore the original DMA addresses */
- vma_addr = flcn64_to_u64(desc->code_dma_base) - vma.offset;
- desc->code_dma_base.lo = lower_32_bits(vma_addr);
- desc->code_dma_base.hi = upper_32_bits(vma_addr);
- vma_addr = flcn64_to_u64(desc->data_dma_base) - vma.offset;
- desc->data_dma_base.lo = lower_32_bits(vma_addr);
- desc->data_dma_base.hi = upper_32_bits(vma_addr);
-
- /* We don't need the ACR firmware anymore */
- nvkm_gpuobj_unmap(&vma);
+ goto end;
- return ret;
-}
+ /* Disable interrupts as we will poll for the HALT bit */
+ nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false);
-/*
- * gm200_secboot_reset() - execute secure boot from the prepared state
- *
- * Load the HS bootloader and ask the falcon to run it. This will in turn
- * load the HS firmware and run it, so once the falcon stops all the managed
- * falcons should have their LS firmware loaded and be ready to run.
- */
-int
-gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int ret;
+ /* Set default error value in mailbox register */
+ nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
- /* Make sure all blobs are ready */
- ret = gm200_secboot_blobs_ready(gsb);
+ /* Start the HS bootloader */
+ nvkm_falcon_set_start_addr(falcon, sb->acr->start_address);
+ nvkm_falcon_start(falcon);
+ ret = nvkm_falcon_wait_for_halt(falcon, 100);
if (ret)
- return ret;
-
- /*
- * Dummy GM200 implementation: perform secure boot each time we are
- * called on FECS. Since only FECS and GPCCS are managed and started
- * together, this ought to be safe.
- *
- * Once we have proper PMU firmware and support, this will be changed
- * to a proper call to the PMU method.
- */
- if (falcon != NVKM_SECBOOT_FALCON_FECS)
goto end;
- /* If WPR is set and we have an unload blob, run it to unlock WPR */
- if (gsb->acr_unload_blob &&
- gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE) {
- ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob,
- &gsb->acr_unload_bl_desc);
- if (ret)
- return ret;
+ /* If mailbox register contains an error code, then ACR has failed */
+ ret = nvkm_falcon_rd32(falcon, 0x040);
+ if (ret) {
+ nvkm_error(subdev, "ACR boot failed, ret 0x%08x", ret);
+ ret = -EINVAL;
+ goto end;
}
- /* Reload all managed falcons */
- ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_load_blob,
- &gsb->acr_load_bl_desc);
- if (ret)
- return ret;
-
end:
- gsb->falcon_state[falcon] = RESET;
- return 0;
-}
+ /* Reenable interrupts */
+ nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
-int
-gm200_secboot_start(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int base;
-
- switch (falcon) {
- case NVKM_SECBOOT_FALCON_FECS:
- base = 0x409000;
- break;
- case NVKM_SECBOOT_FALCON_GPCCS:
- base = 0x41a000;
- break;
- default:
- nvkm_error(&sb->subdev, "cannot start unhandled falcon!\n");
- return -EINVAL;
- }
-
- nvkm_wr32(sb->subdev.device, base + 0x130, 0x00000002);
- gsb->falcon_state[falcon] = RUNNING;
+ /* We don't need the ACR firmware anymore */
+ nvkm_gpuobj_unmap(&vma);
+ nvkm_falcon_put(falcon, subdev);
- return 0;
+ return ret;
}
-
-
int
-gm200_secboot_init(struct nvkm_secboot *sb)
+gm200_secboot_oneinit(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
struct nvkm_device *device = sb->subdev.device;
@@ -1361,24 +132,22 @@ gm200_secboot_init(struct nvkm_secboot *sb)
nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1));
nvkm_done(gsb->inst);
+ if (sb->acr->func->oneinit) {
+ ret = sb->acr->func->oneinit(sb->acr, sb);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
-static int
+int
gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
{
- struct gm200_secboot *gsb = gm200_secboot(sb);
int ret = 0;
- int i;
- /* Run the unload blob to unprotect the WPR region */
- if (gsb->acr_unload_blob &&
- gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE)
- ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob,
- &gsb->acr_unload_bl_desc);
-
- for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
- gsb->falcon_state[i] = NON_SECURE;
+ if (sb->acr->func->fini)
+ ret = sb->acr->func->fini(sb->acr, sb, suspend);
return ret;
}
@@ -1388,11 +157,7 @@ gm200_secboot_dtor(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
- nvkm_gpuobj_del(&gsb->acr_unload_blob);
-
- kfree(gsb->hsbl_blob);
- nvkm_gpuobj_del(&gsb->acr_load_blob);
- nvkm_gpuobj_del(&gsb->ls_blob);
+ sb->acr->func->dtor(sb->acr);
nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd);
nvkm_gpuobj_del(&gsb->pgd);
@@ -1405,50 +170,9 @@ gm200_secboot_dtor(struct nvkm_secboot *sb)
static const struct nvkm_secboot_func
gm200_secboot = {
.dtor = gm200_secboot_dtor,
- .init = gm200_secboot_init,
+ .oneinit = gm200_secboot_oneinit,
.fini = gm200_secboot_fini,
- .reset = gm200_secboot_reset,
- .start = gm200_secboot_start,
- .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) |
- BIT(NVKM_SECBOOT_FALCON_GPCCS),
- .boot_falcon = NVKM_SECBOOT_FALCON_PMU,
-};
-
-/**
- * gm200_fixup_bl_desc - just copy the BL descriptor
- *
- * Use the GM200 descriptor format by default.
- */
-static void
-gm200_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret)
-{
- memcpy(ret, desc, sizeof(*desc));
-}
-
-static void
-gm200_secboot_fixup_hs_desc(struct gm200_secboot *gsb,
- struct hsflcn_acr_desc *desc)
-{
- desc->ucode_blob_base = gsb->ls_blob->addr;
- desc->ucode_blob_size = gsb->ls_blob->size;
-
- desc->wpr_offset = 0;
-
- /* WPR region information for the HS binary to set up */
- desc->wpr_region_id = 1;
- desc->regions.no_regions = 1;
- desc->regions.region_props[0].region_id = 1;
- desc->regions.region_props[0].start_addr = gsb->wpr_addr >> 8;
- desc->regions.region_props[0].end_addr =
- (gsb->wpr_addr + gsb->wpr_size) >> 8;
-}
-
-static const struct gm200_secboot_func
-gm200_secboot_func = {
- .bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
- .fixup_bl_desc = gm200_secboot_fixup_bl_desc,
- .fixup_hs_desc = gm200_secboot_fixup_hs_desc,
- .prepare_blobs = gm200_secboot_prepare_blobs,
+ .run_blob = gm200_secboot_run_blob,
};
int
@@ -1457,6 +181,12 @@ gm200_secboot_new(struct nvkm_device *device, int index,
{
int ret;
struct gm200_secboot *gsb;
+ struct nvkm_acr *acr;
+
+ acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
+ BIT(NVKM_SECBOOT_FALCON_GPCCS));
+ if (IS_ERR(acr))
+ return PTR_ERR(acr);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb) {
@@ -1465,15 +195,14 @@ gm200_secboot_new(struct nvkm_device *device, int index,
}
*psb = &gsb->base;
- ret = nvkm_secboot_ctor(&gm200_secboot, device, index, &gsb->base);
+ ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base);
if (ret)
return ret;
- gsb->func = &gm200_secboot_func;
-
return 0;
}
+
MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
new file mode 100644
index 000000000000..45adf1a3bc20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECBOOT_GM200_H__
+#define __NVKM_SECBOOT_GM200_H__
+
+#include "priv.h"
+
+struct gm200_secboot {
+ struct nvkm_secboot base;
+
+ /* Instance block & address space used for HS FW execution */
+ struct nvkm_gpuobj *inst;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *vm;
+};
+#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
+
+int gm200_secboot_oneinit(struct nvkm_secboot *);
+int gm200_secboot_fini(struct nvkm_secboot *, bool);
+void *gm200_secboot_dtor(struct nvkm_secboot *);
+int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index d5395ebfe8d3..6707b8edc086 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -20,103 +20,8 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-
-#include <core/gpuobj.h>
-
-/*
- * The BL header format used by GM20B's firmware is slightly different
- * from the one of GM200. Fix the differences here.
- */
-struct gm20b_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- u32 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- u32 data_dma_base;
- u32 data_size;
-};
-
-static int
-gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb)
-{
- struct nvkm_subdev *subdev = &gsb->base.subdev;
- int acr_size;
- int ret;
-
- ret = gm20x_secboot_prepare_blobs(gsb);
- if (ret)
- return ret;
-
- acr_size = gsb->acr_load_blob->size;
- /*
- * On Tegra the WPR region is set by the bootloader. It is illegal for
- * the HS blob to be larger than this region.
- */
- if (acr_size > gsb->wpr_size) {
- nvkm_error(subdev, "WPR region too small for FW blob!\n");
- nvkm_error(subdev, "required: %dB\n", acr_size);
- nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size);
- return -ENOSPC;
- }
-
- return 0;
-}
-
-/**
- * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
- *
- * There is only a slight format difference (DMA addresses being 32-bits and
- * 256B-aligned) to address.
- */
-static void
-gm20b_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret)
-{
- struct gm20b_flcn_bl_desc *gdesc = ret;
- u64 addr;
-
- memcpy(gdesc->reserved, desc->reserved, sizeof(gdesc->reserved));
- memcpy(gdesc->signature, desc->signature, sizeof(gdesc->signature));
- gdesc->ctx_dma = desc->ctx_dma;
- addr = desc->code_dma_base.hi;
- addr <<= 32;
- addr |= desc->code_dma_base.lo;
- gdesc->code_dma_base = lower_32_bits(addr >> 8);
- gdesc->non_sec_code_off = desc->non_sec_code_off;
- gdesc->non_sec_code_size = desc->non_sec_code_size;
- gdesc->sec_code_off = desc->sec_code_off;
- gdesc->sec_code_size = desc->sec_code_size;
- gdesc->code_entry_point = desc->code_entry_point;
- addr = desc->data_dma_base.hi;
- addr <<= 32;
- addr |= desc->data_dma_base.lo;
- gdesc->data_dma_base = lower_32_bits(addr >> 8);
- gdesc->data_size = desc->data_size;
-}
-
-static void
-gm20b_secboot_fixup_hs_desc(struct gm200_secboot *gsb,
- struct hsflcn_acr_desc *desc)
-{
- desc->ucode_blob_base = gsb->ls_blob->addr;
- desc->ucode_blob_size = gsb->ls_blob->size;
-
- desc->wpr_offset = 0;
-}
-
-static const struct gm200_secboot_func
-gm20b_secboot_func = {
- .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
- .fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
- .fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
- .prepare_blobs = gm20b_secboot_prepare_blobs,
-};
-
+#include "acr.h"
+#include "gm200.h"
#ifdef CONFIG_ARCH_TEGRA
#define TEGRA_MC_BASE 0x70019000
@@ -144,15 +49,15 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
return PTR_ERR(mc);
}
- gsb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
+ sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32);
- gsb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K)
+ sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K)
<< 17;
cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0);
iounmap(mc);
/* Check that WPR settings are valid */
- if (gsb->wpr_size == 0) {
+ if (sb->wpr_size == 0) {
nvkm_error(&sb->subdev, "WPR region is empty\n");
return -EINVAL;
}
@@ -174,7 +79,7 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
#endif
static int
-gm20b_secboot_init(struct nvkm_secboot *sb)
+gm20b_secboot_oneinit(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
@@ -183,17 +88,15 @@ gm20b_secboot_init(struct nvkm_secboot *sb)
if (ret)
return ret;
- return gm200_secboot_init(sb);
+ return gm200_secboot_oneinit(sb);
}
static const struct nvkm_secboot_func
gm20b_secboot = {
.dtor = gm200_secboot_dtor,
- .init = gm20b_secboot_init,
- .reset = gm200_secboot_reset,
- .start = gm200_secboot_start,
- .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
- .boot_falcon = NVKM_SECBOOT_FALCON_PMU,
+ .oneinit = gm20b_secboot_oneinit,
+ .fini = gm200_secboot_fini,
+ .run_blob = gm200_secboot_run_blob,
};
int
@@ -202,6 +105,11 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
{
int ret;
struct gm200_secboot *gsb;
+ struct nvkm_acr *acr;
+
+ acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS));
+ if (IS_ERR(acr))
+ return PTR_ERR(acr);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb) {
@@ -210,12 +118,10 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
}
*psb = &gsb->base;
- ret = nvkm_secboot_ctor(&gm20b_secboot, device, index, &gsb->base);
+ ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base);
if (ret)
return ret;
- gsb->func = &gm20b_secboot_func;
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
new file mode 100644
index 000000000000..00886cee57eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECBOOT_LS_UCODE_H__
+#define __NVKM_SECBOOT_LS_UCODE_H__
+
+#include <core/os.h>
+#include <core/subdev.h>
+#include <subdev/secboot.h>
+
+
+/**
+ * struct ls_ucode_img_desc - descriptor of firmware image
+ * @descriptor_size: size of this descriptor
+ * @image_size: size of the whole image
+ * @bootloader_start_offset: start offset of the bootloader in ucode image
+ * @bootloader_size: size of the bootloader
+ * @bootloader_imem_offset: start off set of the bootloader in IMEM
+ * @bootloader_entry_point: entry point of the bootloader in IMEM
+ * @app_start_offset: start offset of the LS firmware
+ * @app_size: size of the LS firmware's code and data
+ * @app_imem_offset: offset of the app in IMEM
+ * @app_imem_entry: entry point of the app in IMEM
+ * @app_dmem_offset: offset of the data in DMEM
+ * @app_resident_code_offset: offset of app code from app_start_offset
+ * @app_resident_code_size: size of the code
+ * @app_resident_data_offset: offset of data from app_start_offset
+ * @app_resident_data_size: size of data
+ *
+ * A firmware image contains the code, data, and bootloader of a given LS
+ * falcon in a single blob. This structure describes where everything is.
+ *
+ * This can be generated from a (bootloader, code, data) set if they have
+ * been loaded separately, or come directly from a file.
+ */
+struct ls_ucode_img_desc {
+ u32 descriptor_size;
+ u32 image_size;
+ u32 tools_version;
+ u32 app_version;
+ char date[64];
+ u32 bootloader_start_offset;
+ u32 bootloader_size;
+ u32 bootloader_imem_offset;
+ u32 bootloader_entry_point;
+ u32 app_start_offset;
+ u32 app_size;
+ u32 app_imem_offset;
+ u32 app_imem_entry;
+ u32 app_dmem_offset;
+ u32 app_resident_code_offset;
+ u32 app_resident_code_size;
+ u32 app_resident_data_offset;
+ u32 app_resident_data_size;
+ u32 nb_overlays;
+ struct {u32 start; u32 size; } load_ovl[64];
+ u32 compressed;
+};
+
+/**
+ * struct ls_ucode_img - temporary storage for loaded LS firmwares
+ * @node: to link within lsf_ucode_mgr
+ * @falcon_id: ID of the falcon this LS firmware is for
+ * @ucode_desc: loaded or generated map of ucode_data
+ * @ucode_data: firmware payload (code and data)
+ * @ucode_size: size in bytes of data in ucode_data
+ * @sig: signature for this firmware
+ * @sig:size: size of the signature in bytes
+ *
+ * Preparing the WPR LS blob requires information about all the LS firmwares
+ * (size, etc) to be known. This structure contains all the data of one LS
+ * firmware.
+ */
+struct ls_ucode_img {
+ struct list_head node;
+ enum nvkm_secboot_falcon falcon_id;
+
+ struct ls_ucode_img_desc ucode_desc;
+ u8 *ucode_data;
+ u32 ucode_size;
+
+ u8 *sig;
+ u32 sig_size;
+};
+
+/**
+ * struct fw_bin_header - header of firmware files
+ * @bin_magic: always 0x3b1d14f0
+ * @bin_ver: version of the bin format
+ * @bin_size: entire image size including this header
+ * @header_offset: offset of the firmware/bootloader header in the file
+ * @data_offset: offset of the firmware/bootloader payload in the file
+ * @data_size: size of the payload
+ *
+ * This header is located at the beginning of the HS firmware and HS bootloader
+ * files, to describe where the headers and data can be found.
+ */
+struct fw_bin_header {
+ u32 bin_magic;
+ u32 bin_ver;
+ u32 bin_size;
+ u32 header_offset;
+ u32 data_offset;
+ u32 data_size;
+};
+
+/**
+ * struct fw_bl_desc - firmware bootloader descriptor
+ * @start_tag: starting tag of bootloader
+ * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc
+ * @code_off: offset of code section
+ * @code_size: size of code section
+ * @data_off: offset of data section
+ * @data_size: size of data section
+ *
+ * This structure is embedded in bootloader firmware files at to describe the
+ * IMEM and DMEM layout expected by the bootloader.
+ */
+struct fw_bl_desc {
+ u32 start_tag;
+ u32 dmem_load_off;
+ u32 code_off;
+ u32 code_size;
+ u32 data_off;
+ u32 data_size;
+};
+
+int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *);
+int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *);
+
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
new file mode 100644
index 000000000000..40a6df77bb8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#include "ls_ucode.h"
+#include "acr.h"
+
+#include <core/firmware.h>
+
+#define BL_DESC_BLK_SIZE 256
+/**
+ * Build a ucode image and descriptor from provided bootloader, code and data.
+ *
+ * @bl: bootloader image, including 16-bytes descriptor
+ * @code: LS firmware code segment
+ * @data: LS firmware data segment
+ * @desc: ucode descriptor to be written
+ *
+ * Return: allocated ucode image with corresponding descriptor information. desc
+ * is also updated to contain the right offsets within returned image.
+ */
+static void *
+ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
+ const struct firmware *data, struct ls_ucode_img_desc *desc)
+{
+ struct fw_bin_header *bin_hdr = (void *)bl->data;
+ struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
+ void *bl_data = (void *)bl->data + bin_hdr->data_offset;
+ u32 pos = 0;
+ void *image;
+
+ desc->bootloader_start_offset = pos;
+ desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
+ desc->bootloader_imem_offset = bl_desc->start_tag * 256;
+ desc->bootloader_entry_point = bl_desc->start_tag * 256;
+
+ pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
+ desc->app_start_offset = pos;
+ desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
+ ALIGN(data->size, BL_DESC_BLK_SIZE);
+ desc->app_imem_offset = 0;
+ desc->app_imem_entry = 0;
+ desc->app_dmem_offset = 0;
+ desc->app_resident_code_offset = 0;
+ desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
+
+ pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
+ desc->app_resident_data_offset = pos - desc->app_start_offset;
+ desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
+
+ desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
+ desc->app_size;
+
+ image = kzalloc(desc->image_size, GFP_KERNEL);
+ if (!image)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(image + desc->bootloader_start_offset, bl_data,
+ bl_desc->code_size);
+ memcpy(image + desc->app_start_offset, code->data, code->size);
+ memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
+ data->data, data->size);
+
+ return image;
+}
+
+/**
+ * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image
+ *
+ * Load the LS microcode, bootloader and signature and pack them into a single
+ * blob. Also generate the corresponding ucode descriptor.
+ */
+static int
+ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
+ const char *falcon_name)
+{
+ const struct firmware *bl, *code, *data, *sig;
+ char f[64];
+ int ret;
+
+ snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
+ ret = nvkm_firmware_get(subdev->device, f, &bl);
+ if (ret)
+ goto error;
+
+ snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
+ ret = nvkm_firmware_get(subdev->device, f, &code);
+ if (ret)
+ goto free_bl;
+
+ snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
+ ret = nvkm_firmware_get(subdev->device, f, &data);
+ if (ret)
+ goto free_inst;
+
+ snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
+ ret = nvkm_firmware_get(subdev->device, f, &sig);
+ if (ret)
+ goto free_data;
+ img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
+ if (!img->sig) {
+ ret = -ENOMEM;
+ goto free_sig;
+ }
+ img->sig_size = sig->size;
+
+ img->ucode_data = ls_ucode_img_build(bl, code, data,
+ &img->ucode_desc);
+ if (IS_ERR(img->ucode_data)) {
+ ret = PTR_ERR(img->ucode_data);
+ goto free_data;
+ }
+ img->ucode_size = img->ucode_desc.image_size;
+
+free_sig:
+ nvkm_firmware_put(sig);
+free_data:
+ nvkm_firmware_put(data);
+free_inst:
+ nvkm_firmware_put(code);
+free_bl:
+ nvkm_firmware_put(bl);
+error:
+ return ret;
+}
+
+int
+acr_ls_ucode_load_fecs(const struct nvkm_subdev *subdev,
+ struct ls_ucode_img *img)
+{
+ return ls_ucode_img_load_gr(subdev, img, "fecs");
+}
+
+int
+acr_ls_ucode_load_gpccs(const struct nvkm_subdev *subdev,
+ struct ls_ucode_img *img)
+{
+ return ls_ucode_img_load_gr(subdev, img, "gpccs");
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index a9a8a0e1017e..936a65f5658c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -27,20 +27,16 @@
#include <subdev/mmu.h>
struct nvkm_secboot_func {
- int (*init)(struct nvkm_secboot *);
+ int (*oneinit)(struct nvkm_secboot *);
int (*fini)(struct nvkm_secboot *, bool suspend);
void *(*dtor)(struct nvkm_secboot *);
- int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
- int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
-
- /* ID of the falcon that will perform secure boot */
- enum nvkm_secboot_falcon boot_falcon;
- /* Bit-mask of IDs of managed falcons */
- unsigned long managed_falcons;
+ int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *);
};
-int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_device *,
- int index, struct nvkm_secboot *);
+extern const char *nvkm_secboot_falcon_name[];
+
+int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
+ struct nvkm_device *, int, struct nvkm_secboot *);
int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
int nvkm_secboot_falcon_run(struct nvkm_secboot *);
@@ -48,187 +44,20 @@ struct flcn_u64 {
u32 lo;
u32 hi;
};
+
static inline u64 flcn64_to_u64(const struct flcn_u64 f)
{
return ((u64)f.hi) << 32 | f.lo;
}
-/**
- * struct gm200_flcn_bl_desc - DMEM bootloader descriptor
- * @signature: 16B signature for secure code. 0s if no secure code
- * @ctx_dma: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * (falcon's $xcbase register)
- * @non_sec_code_off: offset from code_dma_base where the non-secure code is
- * located. The offset must be multiple of 256 to help perf
- * @non_sec_code_size: the size of the nonSecure code part.
- * @sec_code_off: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @sec_code_size: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @code_entry_point: code entry point which will be invoked by BL after
- * code is loaded.
- * @data_dma_base: 256B aligned Physical FB Address where data is located.
- * (falcon's $xdbase register)
- * @data_size: size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct gm200_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- struct flcn_u64 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
-};
-
-/**
- * struct hsflcn_acr_desc - data section of the HS firmware
- *
- * This header is to be copied at the beginning of DMEM by the HS bootloader.
- *
- * @signature: signature of ACR ucode
- * @wpr_region_id: region ID holding the WPR header and its details
- * @wpr_offset: offset from the WPR region holding the wpr header
- * @regions: region descriptors
- * @nonwpr_ucode_blob_size: size of LS blob
- * @nonwpr_ucode_blob_start: FB location of LS blob is
- */
-struct hsflcn_acr_desc {
- union {
- u8 reserved_dmem[0x200];
- u32 signatures[4];
- } ucode_reserved_space;
- u32 wpr_region_id;
- u32 wpr_offset;
- u32 mmu_mem_range;
-#define FLCN_ACR_MAX_REGIONS 2
- struct {
- u32 no_regions;
- struct {
- u32 start_addr;
- u32 end_addr;
- u32 region_id;
- u32 read_mask;
- u32 write_mask;
- u32 client_mask;
- } region_props[FLCN_ACR_MAX_REGIONS];
- } regions;
- u32 ucode_blob_size;
- u64 ucode_blob_base __aligned(8);
- struct {
- u32 vpr_enabled;
- u32 vpr_start;
- u32 vpr_end;
- u32 hdcp_policies;
- } vpr_desc;
-};
-
-/**
- * Contains the whole secure boot state, allowing it to be performed as needed
- * @wpr_addr: physical address of the WPR region
- * @wpr_size: size in bytes of the WPR region
- * @ls_blob: LS blob of all the LS firmwares, signatures, bootloaders
- * @ls_blob_size: size of the LS blob
- * @ls_blob_nb_regions: number of LS firmwares that will be loaded
- * @acr_blob: HS blob
- * @acr_blob_vma: mapping of the HS blob into the secure falcon's VM
- * @acr_bl_desc: bootloader descriptor of the HS blob
- * @hsbl_blob: HS blob bootloader
- * @inst: instance block for HS falcon
- * @pgd: page directory for the HS falcon
- * @vm: address space used by the HS falcon
- * @falcon_state: current state of the managed falcons
- * @firmware_ok: whether the firmware blobs have been created
- */
-struct gm200_secboot {
- struct nvkm_secboot base;
- const struct gm200_secboot_func *func;
-
- /*
- * Address and size of the WPR region. On dGPU this will be the
- * address of the LS blob. On Tegra this is a fixed region set by the
- * bootloader
- */
- u64 wpr_addr;
- u32 wpr_size;
-
- /*
- * HS FW - lock WPR region (dGPU only) and load LS FWs
- * on Tegra the HS FW copies the LS blob into the fixed WPR instead
- */
- struct nvkm_gpuobj *acr_load_blob;
- struct gm200_flcn_bl_desc acr_load_bl_desc;
-
- /* HS FW - unlock WPR region (dGPU only) */
- struct nvkm_gpuobj *acr_unload_blob;
- struct gm200_flcn_bl_desc acr_unload_bl_desc;
-
- /* HS bootloader */
- void *hsbl_blob;
-
- /* LS FWs, to be loaded by the HS ACR */
- struct nvkm_gpuobj *ls_blob;
-
- /* Instance block & address space used for HS FW execution */
- struct nvkm_gpuobj *inst;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
-
- /* To keep track of the state of all managed falcons */
- enum {
- /* In non-secure state, no firmware loaded, no privileges*/
- NON_SECURE = 0,
- /* In low-secure mode and ready to be started */
- RESET,
- /* In low-secure mode and running */
- RUNNING,
- } falcon_state[NVKM_SECBOOT_FALCON_END];
-
- bool firmware_ok;
-};
-#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
-
-/**
- * Contains functions we wish to abstract between GM200-like implementations
- * @bl_desc_size: size of the BL descriptor used by this chip.
- * @fixup_bl_desc: hook that generates the proper BL descriptor format from
- * the generic GM200 format into a data array of size
- * bl_desc_size
- * @fixup_hs_desc: hook that twiddles the HS descriptor before it is used
- * @prepare_blobs: prepares the various blobs needed for secure booting
- */
-struct gm200_secboot_func {
- /*
- * Size of the bootloader descriptor for this chip. A block of this
- * size is allocated before booting a falcon and the fixup_bl_desc
- * callback is called on it
- */
- u32 bl_desc_size;
- void (*fixup_bl_desc)(const struct gm200_flcn_bl_desc *, void *);
-
- /*
- * Chip-specific modifications of the HS descriptor can be done here.
- * On dGPU this is used to fill the information about the WPR region
- * we want the HS FW to set up.
- */
- void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *);
- int (*prepare_blobs)(struct gm200_secboot *);
-};
+static inline struct flcn_u64 u64_to_flcn64(u64 u)
+{
+ struct flcn_u64 ret;
-int gm200_secboot_init(struct nvkm_secboot *);
-void *gm200_secboot_dtor(struct nvkm_secboot *);
-int gm200_secboot_reset(struct nvkm_secboot *, u32);
-int gm200_secboot_start(struct nvkm_secboot *, u32);
+ ret.hi = upper_32_bits(u);
+ ret.lo = lower_32_bits(u);
-int gm20x_secboot_prepare_blobs(struct gm200_secboot *);
+ return ret;
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 8894fee30cbc..df949fa7d05d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -64,10 +64,9 @@ nvkm_therm_update_trip(struct nvkm_therm *therm)
}
static int
-nvkm_therm_update_linear(struct nvkm_therm *therm)
+nvkm_therm_compute_linear_duty(struct nvkm_therm *therm, u8 linear_min_temp,
+ u8 linear_max_temp)
{
- u8 linear_min_temp = therm->fan->bios.linear_min_temp;
- u8 linear_max_temp = therm->fan->bios.linear_max_temp;
u8 temp = therm->func->temp_get(therm);
u16 duty;
@@ -85,6 +84,21 @@ nvkm_therm_update_linear(struct nvkm_therm *therm)
return duty;
}
+static int
+nvkm_therm_update_linear(struct nvkm_therm *therm)
+{
+ u8 min = therm->fan->bios.linear_min_temp;
+ u8 max = therm->fan->bios.linear_max_temp;
+ return nvkm_therm_compute_linear_duty(therm, min, max);
+}
+
+static int
+nvkm_therm_update_linear_fallback(struct nvkm_therm *therm)
+{
+ u8 max = therm->bios_sensor.thrs_fan_boost.temp;
+ return nvkm_therm_compute_linear_duty(therm, 30, max);
+}
+
static void
nvkm_therm_update(struct nvkm_therm *therm, int mode)
{
@@ -119,6 +133,8 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
case NVBIOS_THERM_FAN_OTHER:
if (therm->cstate)
duty = therm->cstate;
+ else
+ duty = nvkm_therm_update_linear_fallback(therm);
poll = false;
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
index fe063d5728e2..67ada1d9a28c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -95,6 +95,20 @@ nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
return intr & ~handled;
}
+int
+nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+ struct nvkm_top *top = device->top;
+ struct nvkm_top_device *info;
+
+ list_for_each_entry(info, &top->device, head) {
+ if (info->index == devidx && info->fault >= 0)
+ return info->fault;
+ }
+
+ return -ENOENT;
+}
+
enum nvkm_devidx
nvkm_top_fault(struct nvkm_device *device, int fault)
{
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index dc026a843712..a2bb855a2851 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -1253,7 +1253,7 @@ static int dsicm_probe(struct platform_device *pdev)
dsicm_hw_reset(ddata);
if (ddata->use_dsi_backlight) {
- memset(&props, 0, sizeof(struct backlight_properties));
+ memset(&props, 0, sizeof(props));
props.max_brightness = 255;
props.type = BACKLIGHT_RAW;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 746cb8d9cba1..5ab39e0060f2 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -909,6 +909,7 @@ static struct spi_driver acx565akm_driver = {
module_spi_driver(acx565akm_driver);
+MODULE_ALIAS("spi:sony,acx565akm");
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("acx565akm LCD Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index c839f6456db2..d956e6266368 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -620,6 +620,19 @@ u32 dispc_wb_get_framedone_irq(void)
return DISPC_IRQ_FRAMEDONEWB;
}
+void dispc_mgr_enable(enum omap_channel channel, bool enable)
+{
+ mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
+ /* flush posted write */
+ mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
+}
+EXPORT_SYMBOL(dispc_mgr_enable);
+
+static bool dispc_mgr_is_enabled(enum omap_channel channel)
+{
+ return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
+}
+
bool dispc_mgr_go_busy(enum omap_channel channel)
{
return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
@@ -2493,6 +2506,25 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
return -EINVAL;
}
+ if (*decim_x > 4 && color_mode != OMAP_DSS_COLOR_NV12) {
+ /*
+ * Let's disable all scaling that requires horizontal
+ * decimation with higher factor than 4, until we have
+ * better estimates of what we can and can not
+ * do. However, NV12 color format appears to work Ok
+ * with all decimation factors.
+ *
+ * When decimating horizontally by more that 4 the dss
+ * is not able to fetch the data in burst mode. When
+ * this happens it is hard to tell if there enough
+ * bandwidth. Despite what theory says this appears to
+ * be true also for 16-bit color formats.
+ */
+ DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)", *decim_x);
+
+ return -EINVAL;
+ }
+
*core_clk = dispc.feat->calc_core_clk(pclk, in_width, in_height,
out_width, out_height, mem_to_mem);
return 0;
@@ -2901,20 +2933,6 @@ enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channe
}
EXPORT_SYMBOL(dispc_mgr_get_supported_outputs);
-void dispc_mgr_enable(enum omap_channel channel, bool enable)
-{
- mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
- /* flush posted write */
- mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
-}
-EXPORT_SYMBOL(dispc_mgr_enable);
-
-bool dispc_mgr_is_enabled(enum omap_channel channel)
-{
- return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
-}
-EXPORT_SYMBOL(dispc_mgr_is_enabled);
-
void dispc_wb_enable(bool enable)
{
dispc_ovl_enable(OMAP_DSS_WB, enable);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index f060bda31235..f74615d005a8 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4336,7 +4336,7 @@ static void print_dsi_vm(const char *str,
wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
- bl = t->hss + t->hsa + t->hse + t->hbp + t->hfront_porch;
+ bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
tot = bl + pps;
#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
@@ -4345,14 +4345,14 @@ static void print_dsi_vm(const char *str,
"%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
str,
byteclk,
- t->hss, t->hsa, t->hse, t->hbp, pps, t->hfront_porch,
+ t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
bl, pps, tot,
TO_DSI_T(t->hss),
TO_DSI_T(t->hsa),
TO_DSI_T(t->hse),
TO_DSI_T(t->hbp),
TO_DSI_T(pps),
- TO_DSI_T(t->hfront_porch),
+ TO_DSI_T(t->hfp),
TO_DSI_T(bl),
TO_DSI_T(pps),
@@ -4367,7 +4367,7 @@ static void print_dispc_vm(const char *str, const struct videomode *vm)
int hact, bl, tot;
hact = vm->hactive;
- bl = vm->hsync_len + vm->hbp + vm->hfront_porch;
+ bl = vm->hsync_len + vm->hback_porch + vm->hfront_porch;
tot = hact + bl;
#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
@@ -4376,10 +4376,10 @@ static void print_dispc_vm(const char *str, const struct videomode *vm)
"%u/%u/%u/%u = %u + %u = %u\n",
str,
pck,
- vm->hsync_len, vm->hbp, hact, vm->hfront_porch,
+ vm->hsync_len, vm->hback_porch, hact, vm->hfront_porch,
bl, hact, tot,
TO_DISPC_T(vm->hsync_len),
- TO_DISPC_T(vm->hbp),
+ TO_DISPC_T(vm->hback_porch),
TO_DISPC_T(hact),
TO_DISPC_T(vm->hfront_porch),
TO_DISPC_T(bl),
@@ -4401,12 +4401,12 @@ static void print_dsi_dispc_vm(const char *str,
dsi_tput = (u64)byteclk * t->ndl * 8;
pck = (u32)div64_u64(dsi_tput, t->bitspp);
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
- dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfront_porch;
+ dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
vm.pixelclock = pck;
vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
- vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
- vm.hfront_porch = div64_u64((u64)t->hfront_porch * pck, byteclk);
+ vm.hback_porch = div64_u64((u64)t->hbp * pck, byteclk);
+ vm.hfront_porch = div64_u64((u64)t->hfp * pck, byteclk);
vm.hactive = t->hact;
print_dispc_vm(str, &vm);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 136d30484d02..bf626acae271 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -119,8 +119,7 @@ static void __init omapdss_omapify_node(struct device_node *node)
static void __init omapdss_add_to_list(struct device_node *node, bool root)
{
- struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node),
- GFP_KERNEL);
+ struct dss_conv_node *n = kmalloc(sizeof(*n), GFP_KERNEL);
if (n) {
n->node = node;
n->root = root;
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index b420dde8c0fb..5b3b961127bd 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -856,7 +856,6 @@ int dispc_runtime_get(void);
void dispc_runtime_put(void);
void dispc_mgr_enable(enum omap_channel channel, bool enable);
-bool dispc_mgr_is_enabled(enum omap_channel channel);
u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 2580e8673908..f90e2d22c5ec 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -162,7 +162,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
dssdrv->get_timings(dssdev, &t);
- if (memcmp(&vm, &t, sizeof(struct videomode)))
+ if (memcmp(&vm, &t, sizeof(vm)))
r = -EINVAL;
else
r = 0;
@@ -217,7 +217,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
omap_dss_get_device(dssdev);
- omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
+ omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
if (!omap_connector)
goto fail;
@@ -240,8 +240,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_connector_register(connector);
-
return connector;
fail:
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 8dea89030e66..b68c70eb395f 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -36,26 +36,18 @@ struct omap_crtc {
struct videomode vm;
- struct omap_drm_irq vblank_irq;
- struct omap_drm_irq error_irq;
-
bool ignore_digit_sync_lost;
+ bool enabled;
bool pending;
wait_queue_head_t pending_wait;
+ struct drm_pending_vblank_event *event;
};
/* -----------------------------------------------------------------------------
* Helper Functions
*/
-uint32_t pipe2vbl(struct drm_crtc *crtc)
-{
- struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-
- return dispc_mgr_get_vsync_irq(omap_crtc->channel);
-}
-
struct videomode *omap_crtc_timings(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -68,6 +60,19 @@ enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
return omap_crtc->channel;
}
+static bool omap_crtc_is_pending(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ unsigned long flags;
+ bool pending;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ pending = omap_crtc->pending;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ return pending;
+}
+
int omap_crtc_wait_pending(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -77,7 +82,7 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
* a single frame refresh even on slower displays.
*/
return wait_event_timeout(omap_crtc->pending_wait,
- !omap_crtc->pending,
+ !omap_crtc_is_pending(crtc),
msecs_to_jiffies(250));
}
@@ -135,14 +140,15 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
u32 framedone_irq, vsync_irq;
int ret;
+ if (WARN_ON(omap_crtc->enabled == enable))
+ return;
+
if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) {
dispc_mgr_enable(channel, enable);
+ omap_crtc->enabled = enable;
return;
}
- if (dispc_mgr_is_enabled(channel) == enable)
- return;
-
if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) {
/*
* Digit output produces some sync lost interrupts during the
@@ -173,6 +179,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
}
dispc_mgr_enable(channel, enable);
+ omap_crtc->enabled = enable;
ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
if (ret) {
@@ -259,26 +266,9 @@ static const struct dss_mgr_ops mgr_ops = {
* Setup, Flush and Page Flip
*/
-static void omap_crtc_complete_page_flip(struct drm_crtc *crtc)
-{
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = crtc->dev;
- unsigned long flags;
-
- event = crtc->state->event;
-
- if (!event)
- return;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus)
{
- struct omap_crtc *omap_crtc =
- container_of(irq, struct omap_crtc, error_irq);
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
if (omap_crtc->ignore_digit_sync_lost) {
irqstatus &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
@@ -289,29 +279,38 @@ static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_crtc->name, irqstatus);
}
-static void omap_crtc_vblank_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+void omap_crtc_vblank_irq(struct drm_crtc *crtc)
{
- struct omap_crtc *omap_crtc =
- container_of(irq, struct omap_crtc, vblank_irq);
- struct drm_device *dev = omap_crtc->base.dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ bool pending;
- if (dispc_mgr_go_busy(omap_crtc->channel))
+ spin_lock(&crtc->dev->event_lock);
+ /*
+ * If the dispc is busy we're racing the flush operation. Try again on
+ * the next vblank interrupt.
+ */
+ if (dispc_mgr_go_busy(omap_crtc->channel)) {
+ spin_unlock(&crtc->dev->event_lock);
return;
+ }
- DBG("%s: apply done", omap_crtc->name);
-
- __omap_irq_unregister(dev, &omap_crtc->vblank_irq);
+ /* Send the vblank event if one has been requested. */
+ if (omap_crtc->event) {
+ drm_crtc_send_vblank_event(crtc, omap_crtc->event);
+ omap_crtc->event = NULL;
+ }
- rmb();
- WARN_ON(!omap_crtc->pending);
+ pending = omap_crtc->pending;
omap_crtc->pending = false;
- wmb();
+ spin_unlock(&crtc->dev->event_lock);
- /* wake up userspace */
- omap_crtc_complete_page_flip(&omap_crtc->base);
+ if (pending)
+ drm_crtc_vblank_put(crtc);
- /* wake up omap_atomic_complete */
+ /* Wake up omap_atomic_complete. */
wake_up(&omap_crtc->pending_wait);
+
+ DBG("%s: apply done", omap_crtc->name);
}
/* -----------------------------------------------------------------------------
@@ -324,9 +323,6 @@ static void omap_crtc_destroy(struct drm_crtc *crtc)
DBG("%s", omap_crtc->name);
- WARN_ON(omap_crtc->vblank_irq.registered);
- omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
-
drm_crtc_cleanup(crtc);
kfree(omap_crtc);
@@ -335,17 +331,18 @@ static void omap_crtc_destroy(struct drm_crtc *crtc)
static void omap_crtc_enable(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ int ret;
DBG("%s", omap_crtc->name);
- rmb();
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_vblank_on(crtc);
+ ret = drm_crtc_vblank_get(crtc);
+ WARN_ON(ret != 0);
+
WARN_ON(omap_crtc->pending);
omap_crtc->pending = true;
- wmb();
-
- omap_irq_register(crtc->dev, &omap_crtc->vblank_irq);
-
- drm_crtc_vblank_on(crtc);
+ spin_unlock_irq(&crtc->dev->event_lock);
}
static void omap_crtc_disable(struct drm_crtc *crtc)
@@ -390,16 +387,15 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
}
static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_crtc_state *old_crtc_state)
{
}
static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_crtc_state *old_crtc_state)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-
- WARN_ON(omap_crtc->vblank_irq.registered);
+ int ret;
if (crtc->state->color_mgmt_changed) {
struct drm_color_lut *lut = NULL;
@@ -414,18 +410,24 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
}
- if (dispc_mgr_is_enabled(omap_crtc->channel)) {
+ /* Only flush the CRTC if it is currently enabled. */
+ if (!omap_crtc->enabled)
+ return;
+
+ DBG("%s: GO", omap_crtc->name);
- DBG("%s: GO", omap_crtc->name);
+ ret = drm_crtc_vblank_get(crtc);
+ WARN_ON(ret != 0);
- rmb();
- WARN_ON(omap_crtc->pending);
- omap_crtc->pending = true;
- wmb();
+ spin_lock_irq(&crtc->dev->event_lock);
+ dispc_mgr_go(omap_crtc->channel);
- dispc_mgr_go(omap_crtc->channel);
- omap_irq_register(crtc->dev, &omap_crtc->vblank_irq);
- }
+ WARN_ON(omap_crtc->pending);
+ omap_crtc->pending = true;
+
+ if (crtc->state->event)
+ omap_crtc->event = crtc->state->event;
+ spin_unlock_irq(&crtc->dev->event_lock);
}
static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc,
@@ -546,14 +548,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_crtc->channel = channel;
omap_crtc->name = channel_names[channel];
- omap_crtc->vblank_irq.irqmask = pipe2vbl(crtc);
- omap_crtc->vblank_irq.irq = omap_crtc_vblank_irq;
-
- omap_crtc->error_irq.irqmask =
- dispc_mgr_get_sync_lost_irq(channel);
- omap_crtc->error_irq.irq = omap_crtc_error_irq;
- omap_irq_register(dev, &omap_crtc->error_irq);
-
ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
&omap_crtc_funcs, NULL);
if (ret < 0) {
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 479bf24050f8..19b716745623 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -50,7 +50,11 @@ static int mm_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
+
+ return 0;
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -119,13 +123,4 @@ int omap_debugfs_init(struct drm_minor *minor)
return ret;
}
-void omap_debugfs_cleanup(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(omap_debugfs_list,
- ARRAY_SIZE(omap_debugfs_list), minor);
- if (dmm_is_available())
- drm_debugfs_remove_files(omap_dmm_debugfs_list,
- ARRAY_SIZE(omap_dmm_debugfs_list), minor);
-}
-
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 4ceed7a9762f..3cab06661a08 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -224,7 +224,7 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
int rows = (1 + area->y1 - area->y0);
int i = columns*rows;
- pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
+ pat = alloc_dma(txn, sizeof(*pat), &pat_pa);
if (txn->last_pat)
txn->last_pat->next_pa = (uint32_t)pat_pa;
@@ -735,7 +735,7 @@ static int omap_dmm_probe(struct platform_device *dev)
/* alloc engines */
omap_dmm->engines = kcalloc(omap_dmm->num_engines,
- sizeof(struct refill_engine), GFP_KERNEL);
+ sizeof(*omap_dmm->engines), GFP_KERNEL);
if (!omap_dmm->engines) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index fdc83cbcde61..3f2554235225 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -96,8 +96,22 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
dispc_runtime_get();
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ /* With the current dss dispc implementation we have to enable
+ * the new modeset before we can commit planes. The dispc ovl
+ * configuration relies on the video mode configuration been
+ * written into the HW when the ovl configuration is
+ * calculated.
+ *
+ * This approach is not ideal because after a mode change the
+ * plane update is executed only after the first vblank
+ * interrupt. The dispc implementation should be fixed so that
+ * it is able use uncommitted drm state information.
+ */
drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ omap_atomic_wait_for_completion(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
omap_atomic_wait_for_completion(dev, old_state);
@@ -315,8 +329,6 @@ static int omap_modeset_init(struct drm_device *dev)
drm_mode_config_init(dev);
- omap_drm_irq_install(dev);
-
ret = omap_modeset_init_properties(dev);
if (ret < 0)
return ret;
@@ -489,12 +501,9 @@ static int omap_modeset_init(struct drm_device *dev)
drm_mode_config_reset(dev);
- return 0;
-}
+ omap_drm_irq_install(dev);
-static void omap_modeset_free(struct drm_device *dev)
-{
- drm_mode_config_cleanup(dev);
+ return 0;
}
/*
@@ -632,95 +641,6 @@ static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] =
* drm driver funcs
*/
-/**
- * load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
- *
- * The driver load routine has to do several things:
- * - initialize the memory manager
- * - allocate initial config memory
- * - setup the DRM framebuffer with the allocated memory
- */
-static int dev_load(struct drm_device *dev, unsigned long flags)
-{
- struct omap_drm_platform_data *pdata = dev->dev->platform_data;
- struct omap_drm_private *priv;
- unsigned int i;
- int ret;
-
- DBG("load: dev=%p", dev);
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->omaprev = pdata->omaprev;
-
- dev->dev_private = priv;
-
- priv->wq = alloc_ordered_workqueue("omapdrm", 0);
- init_waitqueue_head(&priv->commit.wait);
- spin_lock_init(&priv->commit.lock);
-
- spin_lock_init(&priv->list_lock);
- INIT_LIST_HEAD(&priv->obj_list);
-
- omap_gem_init(dev);
-
- ret = omap_modeset_init(dev);
- if (ret) {
- dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
- dev->dev_private = NULL;
- kfree(priv);
- return ret;
- }
-
- /* Initialize vblank handling, start with all CRTCs disabled. */
- ret = drm_vblank_init(dev, priv->num_crtcs);
- if (ret)
- dev_warn(dev->dev, "could not init vblank\n");
-
- for (i = 0; i < priv->num_crtcs; i++)
- drm_crtc_vblank_off(priv->crtcs[i]);
-
- priv->fbdev = omap_fbdev_init(dev);
-
- /* store off drm_device for use in pm ops */
- dev_set_drvdata(dev->dev, dev);
-
- drm_kms_helper_poll_init(dev);
-
- return 0;
-}
-
-static int dev_unload(struct drm_device *dev)
-{
- struct omap_drm_private *priv = dev->dev_private;
-
- DBG("unload: dev=%p", dev);
-
- drm_kms_helper_poll_fini(dev);
-
- if (priv->fbdev)
- omap_fbdev_free(dev);
-
- omap_modeset_free(dev);
- omap_gem_deinit(dev);
-
- destroy_workqueue(priv->wq);
-
- drm_vblank_cleanup(dev);
- omap_drm_irq_uninstall(dev);
-
- kfree(dev->dev_private);
- dev->dev_private = NULL;
-
- dev_set_drvdata(dev->dev, NULL);
-
- return 0;
-}
-
static int dev_open(struct drm_device *dev, struct drm_file *file)
{
file->driver_priv = NULL;
@@ -805,8 +725,6 @@ static const struct file_operations omapdriver_fops = {
static struct drm_driver omap_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .load = dev_load,
- .unload = dev_unload,
.open = dev_open,
.lastclose = dev_lastclose,
.get_vblank_counter = drm_vblank_no_hw_counter,
@@ -814,7 +732,6 @@ static struct drm_driver omap_drm_driver = {
.disable_vblank = omap_irq_disable_vblank,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = omap_debugfs_init,
- .debugfs_cleanup = omap_debugfs_cleanup,
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -836,30 +753,125 @@ static struct drm_driver omap_drm_driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
-static int pdev_probe(struct platform_device *device)
+static int pdev_probe(struct platform_device *pdev)
{
- int r;
+ struct omap_drm_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_drm_private *priv;
+ struct drm_device *ddev;
+ unsigned int i;
+ int ret;
+
+ DBG("%s", pdev->name);
if (omapdss_is_initialized() == false)
return -EPROBE_DEFER;
omap_crtc_pre_init();
- r = omap_connect_dssdevs();
- if (r) {
- omap_crtc_pre_uninit();
- return r;
+ ret = omap_connect_dssdevs();
+ if (ret)
+ goto err_crtc_uninit;
+
+ /* Allocate and initialize the driver private structure. */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_disconnect_dssdevs;
}
- DBG("%s", device->name);
- return drm_platform_init(&omap_drm_driver, device);
+ priv->omaprev = pdata->omaprev;
+ priv->wq = alloc_ordered_workqueue("omapdrm", 0);
+
+ init_waitqueue_head(&priv->commit.wait);
+ spin_lock_init(&priv->commit.lock);
+ spin_lock_init(&priv->list_lock);
+ INIT_LIST_HEAD(&priv->obj_list);
+
+ /* Allocate and initialize the DRM device. */
+ ddev = drm_dev_alloc(&omap_drm_driver, &pdev->dev);
+ if (IS_ERR(ddev)) {
+ ret = PTR_ERR(ddev);
+ goto err_free_priv;
+ }
+
+ ddev->dev_private = priv;
+ platform_set_drvdata(pdev, ddev);
+
+ omap_gem_init(ddev);
+
+ ret = omap_modeset_init(ddev);
+ if (ret) {
+ dev_err(&pdev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+ goto err_free_drm_dev;
+ }
+
+ /* Initialize vblank handling, start with all CRTCs disabled. */
+ ret = drm_vblank_init(ddev, priv->num_crtcs);
+ if (ret) {
+ dev_err(&pdev->dev, "could not init vblank\n");
+ goto err_cleanup_modeset;
+ }
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ drm_crtc_vblank_off(priv->crtcs[i]);
+
+ priv->fbdev = omap_fbdev_init(ddev);
+
+ drm_kms_helper_poll_init(ddev);
+
+ /*
+ * Register the DRM device with the core and the connectors with
+ * sysfs.
+ */
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto err_cleanup_helpers;
+
+ return 0;
+
+err_cleanup_helpers:
+ drm_kms_helper_poll_fini(ddev);
+ if (priv->fbdev)
+ omap_fbdev_free(ddev);
+err_cleanup_modeset:
+ drm_mode_config_cleanup(ddev);
+ omap_drm_irq_uninstall(ddev);
+err_free_drm_dev:
+ omap_gem_deinit(ddev);
+ drm_dev_unref(ddev);
+err_free_priv:
+ destroy_workqueue(priv->wq);
+ kfree(priv);
+err_disconnect_dssdevs:
+ omap_disconnect_dssdevs();
+err_crtc_uninit:
+ omap_crtc_pre_uninit();
+ return ret;
}
-static int pdev_remove(struct platform_device *device)
+static int pdev_remove(struct platform_device *pdev)
{
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+ struct omap_drm_private *priv = ddev->dev_private;
+
DBG("");
- drm_put_dev(platform_get_drvdata(device));
+ drm_dev_unregister(ddev);
+
+ drm_kms_helper_poll_fini(ddev);
+
+ if (priv->fbdev)
+ omap_fbdev_free(ddev);
+
+ drm_mode_config_cleanup(ddev);
+
+ omap_drm_irq_uninstall(ddev);
+ omap_gem_deinit(ddev);
+
+ drm_dev_unref(ddev);
+
+ destroy_workqueue(priv->wq);
+ kfree(priv);
omap_disconnect_dssdevs();
omap_crtc_pre_uninit();
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 7d9dd5400cef..36d93ce84a29 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -48,19 +48,6 @@ struct omap_drm_window {
uint32_t src_w, src_h;
};
-/* For transiently registering for different DSS irqs that various parts
- * of the KMS code need during setup/configuration. We these are not
- * necessarily the same as what drm_vblank_get/put() are requesting, and
- * the hysteresis in drm_vblank_put() is not necessarily desirable for
- * internal housekeeping related irq usage.
- */
-struct omap_drm_irq {
- struct list_head node;
- uint32_t irqmask;
- bool registered;
- void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus);
-};
-
/* For KMS code that needs to wait for a certain # of IRQs:
*/
struct omap_irq_wait;
@@ -101,9 +88,9 @@ struct omap_drm_private {
struct drm_property *zorder_prop;
/* irq handling: */
- struct list_head irq_list; /* list of omap_drm_irq */
- uint32_t vblank_mask; /* irq bits set for userspace vblank */
- struct omap_drm_irq error_handler;
+ spinlock_t wait_lock; /* protects the wait_list */
+ struct list_head wait_list; /* list of omap_irq_wait */
+ uint32_t irq_mask; /* enabled irqs in addition to wait_list */
/* atomic commit */
struct {
@@ -116,7 +103,6 @@ struct omap_drm_private {
#ifdef CONFIG_DEBUG_FS
int omap_debugfs_init(struct drm_minor *minor);
-void omap_debugfs_cleanup(struct drm_minor *minor);
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
@@ -128,10 +114,6 @@ int omap_gem_resume(struct device *dev);
int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe);
void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe);
-void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
-void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
-void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
-void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
void omap_drm_irq_uninstall(struct drm_device *dev);
int omap_drm_irq_install(struct drm_device *dev);
@@ -155,6 +137,8 @@ void omap_crtc_pre_uninit(void);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_plane *plane, enum omap_channel channel, int id);
int omap_crtc_wait_pending(struct drm_crtc *crtc);
+void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus);
+void omap_crtc_vblank_irq(struct drm_crtc *crtc);
struct drm_plane *omap_plane_init(struct drm_device *dev,
int id, enum drm_plane_type type,
@@ -233,32 +217,6 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
/* map crtc to vblank mask */
-uint32_t pipe2vbl(struct drm_crtc *crtc);
struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
-/* should these be made into common util helpers?
- */
-
-static inline int objects_lookup(
- struct drm_file *filp, uint32_t pixel_format,
- struct drm_gem_object **bos, const uint32_t *handles)
-{
- int i, n = drm_format_num_planes(pixel_format);
-
- for (i = 0; i < n; i++) {
- bos[i] = drm_gem_object_lookup(filp, handles[i]);
- if (!bos[i])
- goto fail;
-
- }
-
- return 0;
-
-fail:
- while (--i > 0)
- drm_gem_object_unreference_unlocked(bos[i]);
-
- return -ENOENT;
-}
-
#endif /* __OMAP_DRV_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index a20f30039aee..86c977b7189a 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -117,7 +117,7 @@ static int omap_encoder_update(struct drm_encoder *encoder,
dssdrv->get_timings(dssdev, &t);
- if (memcmp(vm, &t, sizeof(struct videomode)))
+ if (memcmp(vm, &t, sizeof(*vm)))
ret = -EINVAL;
else
ret = 0;
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 5f3337f1e9aa..29dc677dd4d3 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -29,37 +29,30 @@
* framebuffer funcs
*/
-/* per-format info: */
-struct format {
+/* DSS to DRM formats mapping */
+static const struct {
enum omap_color_mode dss_format;
uint32_t pixel_format;
- struct {
- int stride_bpp; /* this times width is stride */
- int sub_y; /* sub-sample in y dimension */
- } planes[4];
- bool yuv;
-};
-
-static const struct format formats[] = {
+} formats[] = {
/* 16bpp [A]RGB: */
- { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565, {{2, 1}}, false }, /* RGB16-565 */
- { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */
- { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */
- { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */
- { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */
- { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */
- { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */
+ { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565 }, /* RGB16-565 */
+ { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444 }, /* RGB12x-4444 */
+ { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444 }, /* xRGB12-4444 */
+ { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444 }, /* RGBA12-4444 */
+ { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444 }, /* ARGB16-4444 */
+ { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555 }, /* xRGB15-1555 */
+ { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555 }, /* ARGB16-1555 */
/* 24bpp RGB: */
- { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888, {{3, 1}}, false }, /* RGB24-888 */
+ { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888 }, /* RGB24-888 */
/* 32bpp [A]RGB: */
- { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */
- { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */
- { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */
- { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */
+ { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888 }, /* RGBx24-8888 */
+ { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888 }, /* xRGB24-8888 */
+ { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888 }, /* RGBA32-8888 */
+ { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888 }, /* ARGB32-8888 */
/* YUV: */
- { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12, {{1, 1}, {1, 2}}, true },
- { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV, {{2, 1}}, true },
- { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true },
+ { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12 },
+ { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV },
+ { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY },
};
/* convert from overlay's pixel formats bitmask to an array of fourcc's */
@@ -89,8 +82,9 @@ struct plane {
struct omap_framebuffer {
struct drm_framebuffer base;
int pin_count;
- const struct format *format;
- struct plane planes[4];
+ const struct drm_format_info *format;
+ enum omap_color_mode dss_format;
+ struct plane planes[2];
/* lock for pinning (pin_count and planes.paddr) */
struct mutex lock;
};
@@ -107,7 +101,7 @@ static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = fb->format->num_planes;
DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
@@ -128,13 +122,13 @@ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
};
static uint32_t get_linear_addr(struct plane *plane,
- const struct format *format, int n, int x, int y)
+ const struct drm_format_info *format, int n, int x, int y)
{
uint32_t offset;
- offset = plane->offset +
- (x * format->planes[n].stride_bpp) +
- (y * plane->pitch / format->planes[n].sub_y);
+ offset = plane->offset
+ + (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
+ + (y * plane->pitch / (n == 0 ? 1 : format->vsub));
return plane->paddr + offset;
}
@@ -153,11 +147,11 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- const struct format *format = omap_fb->format;
+ const struct drm_format_info *format = omap_fb->format;
struct plane *plane = &omap_fb->planes[0];
uint32_t x, y, orient = 0;
- info->color_mode = format->dss_format;
+ info->color_mode = omap_fb->dss_format;
info->pos_x = win->crtc_x;
info->pos_y = win->crtc_y;
@@ -231,9 +225,9 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
}
/* convert to pixels: */
- info->screen_width /= format->planes[0].stride_bpp;
+ info->screen_width /= format->cpp[0];
- if (format->dss_format == OMAP_DSS_COLOR_NV12) {
+ if (omap_fb->dss_format == OMAP_DSS_COLOR_NV12) {
plane = &omap_fb->planes[1];
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
@@ -252,7 +246,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
int omap_framebuffer_pin(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ int ret, i, n = fb->format->num_planes;
mutex_lock(&omap_fb->lock);
@@ -292,7 +286,7 @@ fail:
void omap_framebuffer_unpin(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = fb->format->num_planes;
mutex_lock(&omap_fb->lock);
@@ -343,10 +337,10 @@ struct drm_connector *omap_framebuffer_get_next_connector(
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
- (char *)&fb->pixel_format);
+ (char *)&fb->format->format);
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
@@ -360,47 +354,58 @@ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ unsigned int num_planes = drm_format_num_planes(mode_cmd->pixel_format);
struct drm_gem_object *bos[4];
struct drm_framebuffer *fb;
- int ret;
+ int i;
- ret = objects_lookup(file, mode_cmd->pixel_format,
- bos, mode_cmd->handles);
- if (ret)
- return ERR_PTR(ret);
+ for (i = 0; i < num_planes; i++) {
+ bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
+ if (!bos[i]) {
+ fb = ERR_PTR(-ENOENT);
+ goto error;
+ }
+ }
fb = omap_framebuffer_init(dev, mode_cmd, bos);
- if (IS_ERR(fb)) {
- int i, n = drm_format_num_planes(mode_cmd->pixel_format);
- for (i = 0; i < n; i++)
- drm_gem_object_unreference_unlocked(bos[i]);
- return fb;
- }
+ if (IS_ERR(fb))
+ goto error;
+
+ return fb;
+
+error:
+ while (--i > 0)
+ drm_gem_object_unreference_unlocked(bos[i]);
+
return fb;
}
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
+ const struct drm_format_info *format = NULL;
struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
- const struct format *format = NULL;
- int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+ enum omap_color_mode dss_format = 0;
+ unsigned int pitch = mode_cmd->pitches[0];
+ int ret, i;
DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
+ format = drm_format_info(mode_cmd->pixel_format);
+
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i].pixel_format == mode_cmd->pixel_format) {
- format = &formats[i];
+ dss_format = formats[i].dss_format;
break;
}
}
- if (!format) {
- dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
- (char *)&mode_cmd->pixel_format);
+ if (!format || !dss_format) {
+ dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n",
+ (char *)&mode_cmd->pixel_format);
ret = -EINVAL;
goto fail;
}
@@ -413,40 +418,39 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
fb = &omap_fb->base;
omap_fb->format = format;
+ omap_fb->dss_format = dss_format;
mutex_init(&omap_fb->lock);
- for (i = 0; i < n; i++) {
- struct plane *plane = &omap_fb->planes[i];
- int size, pitch = mode_cmd->pitches[i];
-
- if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) {
- dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n",
- pitch, mode_cmd->width * format->planes[i].stride_bpp);
- ret = -EINVAL;
- goto fail;
- }
+ /*
+ * The code below assumes that no format use more than two planes, and
+ * that the two planes of multiplane formats need the same number of
+ * bytes per pixel.
+ */
+ if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
+ dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n");
+ ret = -EINVAL;
+ goto fail;
+ }
- if (pitch % format->planes[i].stride_bpp != 0) {
- dev_err(dev->dev,
- "buffer pitch (%d bytes) is not a multiple of pixel size (%d bytes)\n",
- pitch, format->planes[i].stride_bpp);
- ret = -EINVAL;
- goto fail;
- }
+ if (pitch % format->cpp[0]) {
+ dev_dbg(dev->dev,
+ "buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n",
+ pitch, format->cpp[0]);
+ ret = -EINVAL;
+ goto fail;
+ }
- size = pitch * mode_cmd->height / format->planes[i].sub_y;
+ for (i = 0; i < format->num_planes; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ unsigned int vsub = i == 0 ? 1 : format->vsub;
+ unsigned int size;
- if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) {
- dev_err(dev->dev, "provided buffer object is too small! %d < %d\n",
- bos[i]->size - mode_cmd->offsets[i], size);
- ret = -EINVAL;
- goto fail;
- }
+ size = pitch * mode_cmd->height / vsub;
- if (i > 0 && pitch != mode_cmd->pitches[i - 1]) {
- dev_err(dev->dev,
- "pitches are not the same between framebuffer planes %d != %d\n",
- pitch, mode_cmd->pitches[i - 1]);
+ if (size > omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i]) {
+ dev_dbg(dev->dev,
+ "provided buffer object is too small! %d < %d\n",
+ bos[i]->size - mode_cmd->offsets[i], size);
ret = -EINVAL;
goto fail;
}
@@ -457,7 +461,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->paddr = 0;
}
- drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
if (ret) {
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 8d8ac173f55d..942c4d483008 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -190,7 +190,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
strcpy(fbi->fix.id, MODULE_NAME);
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
dev->mode_config.fb_base = paddr;
@@ -225,10 +225,8 @@ fail:
drm_fb_helper_release_fbi(helper);
- if (fb) {
- drm_framebuffer_unregister_private(fb);
+ if (fb)
drm_framebuffer_remove(fb);
- }
}
return ret;
@@ -265,8 +263,7 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper,
- priv->num_crtcs, priv->num_connectors);
+ ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
if (ret) {
dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
@@ -314,10 +311,8 @@ void omap_fbdev_free(struct drm_device *dev)
omap_gem_put_paddr(fbdev->bo);
/* this will free the backing object */
- if (fbdev->fb) {
- drm_framebuffer_unregister_private(fbdev->fb);
+ if (fbdev->fb)
drm_framebuffer_remove(fbdev->fb);
- }
kfree(fbdev);
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 60e1e8016708..9adfa7c99695 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -19,25 +19,24 @@
#include "omap_drv.h"
-static DEFINE_SPINLOCK(list_lock);
-
-static void omap_irq_error_handler(struct omap_drm_irq *irq,
- uint32_t irqstatus)
-{
- DRM_ERROR("errors: %08x\n", irqstatus);
-}
+struct omap_irq_wait {
+ struct list_head node;
+ wait_queue_head_t wq;
+ uint32_t irqmask;
+ int count;
+};
-/* call with list_lock and dispc runtime held */
+/* call with wait_lock and dispc runtime held */
static void omap_irq_update(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- struct omap_drm_irq *irq;
- uint32_t irqmask = priv->vblank_mask;
+ struct omap_irq_wait *wait;
+ uint32_t irqmask = priv->irq_mask;
- assert_spin_locked(&list_lock);
+ assert_spin_locked(&priv->wait_lock);
- list_for_each_entry(irq, &priv->irq_list, node)
- irqmask |= irq->irqmask;
+ list_for_each_entry(wait, &priv->wait_list, node)
+ irqmask |= wait->irqmask;
DBG("irqmask=%08x", irqmask);
@@ -45,90 +44,48 @@ static void omap_irq_update(struct drm_device *dev)
dispc_read_irqenable(); /* flush posted write */
}
-void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
-{
- struct omap_drm_private *priv = dev->dev_private;
- unsigned long flags;
-
- spin_lock_irqsave(&list_lock, flags);
-
- if (!WARN_ON(irq->registered)) {
- irq->registered = true;
- list_add(&irq->node, &priv->irq_list);
- omap_irq_update(dev);
- }
-
- spin_unlock_irqrestore(&list_lock, flags);
-}
-
-void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
-{
- dispc_runtime_get();
-
- __omap_irq_register(dev, irq);
-
- dispc_runtime_put();
-}
-
-void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+static void omap_irq_wait_handler(struct omap_irq_wait *wait)
{
- unsigned long flags;
-
- spin_lock_irqsave(&list_lock, flags);
-
- if (!WARN_ON(!irq->registered)) {
- irq->registered = false;
- list_del(&irq->node);
- omap_irq_update(dev);
- }
-
- spin_unlock_irqrestore(&list_lock, flags);
-}
-
-void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
-{
- dispc_runtime_get();
-
- __omap_irq_unregister(dev, irq);
-
- dispc_runtime_put();
-}
-
-struct omap_irq_wait {
- struct omap_drm_irq irq;
- int count;
-};
-
-static DECLARE_WAIT_QUEUE_HEAD(wait_event);
-
-static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
- struct omap_irq_wait *wait =
- container_of(irq, struct omap_irq_wait, irq);
wait->count--;
- wake_up_all(&wait_event);
+ wake_up(&wait->wq);
}
struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
uint32_t irqmask, int count)
{
+ struct omap_drm_private *priv = dev->dev_private;
struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
- wait->irq.irq = wait_irq;
- wait->irq.irqmask = irqmask;
+ unsigned long flags;
+
+ init_waitqueue_head(&wait->wq);
+ wait->irqmask = irqmask;
wait->count = count;
- omap_irq_register(dev, &wait->irq);
+
+ spin_lock_irqsave(&priv->wait_lock, flags);
+ list_add(&wait->node, &priv->wait_list);
+ omap_irq_update(dev);
+ spin_unlock_irqrestore(&priv->wait_lock, flags);
+
return wait;
}
int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
unsigned long timeout)
{
- int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
- omap_irq_unregister(dev, &wait->irq);
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+ int ret;
+
+ ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout);
+
+ spin_lock_irqsave(&priv->wait_lock, flags);
+ list_del(&wait->node);
+ omap_irq_update(dev);
+ spin_unlock_irqrestore(&priv->wait_lock, flags);
+
kfree(wait);
- if (ret == 0)
- return -1;
- return 0;
+
+ return ret == 0 ? -1 : 0;
}
/**
@@ -152,10 +109,10 @@ int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe)
DBG("dev=%p, crtc=%u", dev, pipe);
- spin_lock_irqsave(&list_lock, flags);
- priv->vblank_mask |= pipe2vbl(crtc);
+ spin_lock_irqsave(&priv->wait_lock, flags);
+ priv->irq_mask |= dispc_mgr_get_vsync_irq(omap_crtc_channel(crtc));
omap_irq_update(dev);
- spin_unlock_irqrestore(&list_lock, flags);
+ spin_unlock_irqrestore(&priv->wait_lock, flags);
return 0;
}
@@ -177,17 +134,66 @@ void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe)
DBG("dev=%p, crtc=%u", dev, pipe);
- spin_lock_irqsave(&list_lock, flags);
- priv->vblank_mask &= ~pipe2vbl(crtc);
+ spin_lock_irqsave(&priv->wait_lock, flags);
+ priv->irq_mask &= ~dispc_mgr_get_vsync_irq(omap_crtc_channel(crtc));
omap_irq_update(dev);
- spin_unlock_irqrestore(&list_lock, flags);
+ spin_unlock_irqrestore(&priv->wait_lock, flags);
+}
+
+static void omap_irq_fifo_underflow(struct omap_drm_private *priv,
+ u32 irqstatus)
+{
+ static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ static const struct {
+ const char *name;
+ u32 mask;
+ } sources[] = {
+ { "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW },
+ { "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW },
+ { "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW },
+ { "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW },
+ };
+
+ const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW
+ | DISPC_IRQ_VID1_FIFO_UNDERFLOW
+ | DISPC_IRQ_VID2_FIFO_UNDERFLOW
+ | DISPC_IRQ_VID3_FIFO_UNDERFLOW;
+ unsigned int i;
+
+ spin_lock(&priv->wait_lock);
+ irqstatus &= priv->irq_mask & mask;
+ spin_unlock(&priv->wait_lock);
+
+ if (!irqstatus)
+ return;
+
+ if (!__ratelimit(&_rs))
+ return;
+
+ DRM_ERROR("FIFO underflow on ");
+
+ for (i = 0; i < ARRAY_SIZE(sources); ++i) {
+ if (sources[i].mask & irqstatus)
+ pr_cont("%s ", sources[i].name);
+ }
+
+ pr_cont("(0x%08x)\n", irqstatus);
+}
+
+static void omap_irq_ocp_error_handler(u32 irqstatus)
+{
+ if (!(irqstatus & DISPC_IRQ_OCP_ERR))
+ return;
+
+ DRM_ERROR("OCP error\n");
}
static irqreturn_t omap_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct omap_drm_private *priv = dev->dev_private;
- struct omap_drm_irq *handler, *n;
+ struct omap_irq_wait *wait, *n;
unsigned long flags;
unsigned int id;
u32 irqstatus;
@@ -200,24 +206,37 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
for (id = 0; id < priv->num_crtcs; id++) {
struct drm_crtc *crtc = priv->crtcs[id];
+ enum omap_channel channel = omap_crtc_channel(crtc);
- if (irqstatus & pipe2vbl(crtc))
+ if (irqstatus & dispc_mgr_get_vsync_irq(channel)) {
drm_handle_vblank(dev, id);
+ omap_crtc_vblank_irq(crtc);
+ }
+
+ if (irqstatus & dispc_mgr_get_sync_lost_irq(channel))
+ omap_crtc_error_irq(crtc, irqstatus);
}
- spin_lock_irqsave(&list_lock, flags);
- list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
- if (handler->irqmask & irqstatus) {
- spin_unlock_irqrestore(&list_lock, flags);
- handler->irq(handler, handler->irqmask & irqstatus);
- spin_lock_irqsave(&list_lock, flags);
- }
+ omap_irq_ocp_error_handler(irqstatus);
+ omap_irq_fifo_underflow(priv, irqstatus);
+
+ spin_lock_irqsave(&priv->wait_lock, flags);
+ list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
+ if (wait->irqmask & irqstatus)
+ omap_irq_wait_handler(wait);
}
- spin_unlock_irqrestore(&list_lock, flags);
+ spin_unlock_irqrestore(&priv->wait_lock, flags);
return IRQ_HANDLED;
}
+static const u32 omap_underflow_irqs[] = {
+ [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+ [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+};
+
/*
* We need a special version, instead of just using drm_irq_install(),
* because we need to register the irq via omapdss. Once omapdss and
@@ -228,10 +247,25 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
int omap_drm_irq_install(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- struct omap_drm_irq *error_handler = &priv->error_handler;
+ unsigned int num_mgrs = dss_feat_get_num_mgrs();
+ unsigned int max_planes;
+ unsigned int i;
int ret;
- INIT_LIST_HEAD(&priv->irq_list);
+ spin_lock_init(&priv->wait_lock);
+ INIT_LIST_HEAD(&priv->wait_list);
+
+ priv->irq_mask = DISPC_IRQ_OCP_ERR;
+
+ max_planes = min(ARRAY_SIZE(priv->planes),
+ ARRAY_SIZE(omap_underflow_irqs));
+ for (i = 0; i < max_planes; ++i) {
+ if (priv->planes[i])
+ priv->irq_mask |= omap_underflow_irqs[i];
+ }
+
+ for (i = 0; i < num_mgrs; ++i)
+ priv->irq_mask |= dispc_mgr_get_sync_lost_irq(i);
dispc_runtime_get();
dispc_clear_irqstatus(0xffffffff);
@@ -241,16 +275,6 @@ int omap_drm_irq_install(struct drm_device *dev)
if (ret < 0)
return ret;
- error_handler->irq = omap_irq_error_handler;
- error_handler->irqmask = DISPC_IRQ_OCP_ERR;
-
- /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think
- * we just need to ignore it while enabling tv-out
- */
- error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
-
- omap_irq_register(dev, error_handler);
-
dev->irq_enabled = true;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 82b2c23d6769..386d90af70f7 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -43,8 +43,6 @@ struct omap_plane {
uint32_t nformats;
uint32_t formats[32];
-
- struct omap_drm_irq error_irq;
};
struct omap_plane_state {
@@ -204,8 +202,6 @@ static void omap_plane_destroy(struct drm_plane *plane)
DBG("%s", omap_plane->name);
- omap_irq_unregister(plane->dev, &omap_plane->error_irq);
-
drm_plane_cleanup(plane);
kfree(omap_plane);
@@ -332,14 +328,6 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.atomic_get_property = omap_plane_atomic_get_property,
};
-static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
- struct omap_plane *omap_plane =
- container_of(irq, struct omap_plane, error_irq);
- DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_plane->name,
- irqstatus);
-}
-
static const char *plane_names[] = {
[OMAP_DSS_GFX] = "gfx",
[OMAP_DSS_VIDEO1] = "vid1",
@@ -347,13 +335,6 @@ static const char *plane_names[] = {
[OMAP_DSS_VIDEO3] = "vid3",
};
-static const uint32_t error_irqs[] = {
- [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
- [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
-};
-
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
int id, enum drm_plane_type type,
@@ -377,10 +358,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
plane = &omap_plane->base;
- omap_plane->error_irq.irqmask = error_irqs[id];
- omap_plane->error_irq.irq = omap_plane_error_irq;
- omap_irq_register(dev, &omap_plane->error_irq);
-
ret = drm_universal_plane_init(dev, plane, possible_crtcs,
&omap_plane_funcs, omap_plane->formats,
omap_plane->nformats, type, NULL);
@@ -394,7 +371,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
return plane;
error:
- omap_irq_unregister(plane->dev, &omap_plane->error_irq);
kfree(omap_plane);
return NULL;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 06aaf79de8c8..89eb0422821c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -668,6 +668,48 @@ static const struct panel_desc avic_tm070ddh03 = {
},
};
+static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
+ {
+ .clock = 71900,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 5,
+ .vtotal = 800 + 3 + 5 + 24,
+ .vrefresh = 60,
+ },
+ {
+ .clock = 57500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 5,
+ .vtotal = 800 + 3 + 5 + 24,
+ .vrefresh = 48,
+ },
+};
+
+static const struct panel_desc boe_nv101wxmn51 = {
+ .modes = boe_nv101wxmn51_modes,
+ .num_modes = ARRAY_SIZE(boe_nv101wxmn51_modes),
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .prepare = 210,
+ .enable = 50,
+ .unprepare = 160,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa070wp03xg_mode = {
.clock = 66770,
.hdisplay = 800,
@@ -760,6 +802,8 @@ static const struct panel_desc edt_et057090dhu = {
.width = 115,
.height = 86,
},
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
};
static const struct drm_display_mode edt_etm0700g0dh6_mode = {
@@ -784,6 +828,8 @@ static const struct panel_desc edt_etm0700g0dh6 = {
.width = 152,
.height = 91,
},
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
};
static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
@@ -1277,6 +1323,29 @@ static const struct panel_desc nec_nl4827hc19_05b = {
.bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
};
+static const struct drm_display_mode netron_dy_e231732_mode = {
+ .clock = 66000,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 160,
+ .hsync_end = 1024 + 160 + 70,
+ .htotal = 1024 + 160 + 70 + 90,
+ .vdisplay = 600,
+ .vsync_start = 600 + 127,
+ .vsync_end = 600 + 127 + 20,
+ .vtotal = 600 + 127 + 20 + 3,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc netron_dy_e231732 = {
+ .modes = &netron_dy_e231732_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 154,
+ .height = 87,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode nvd_9128_mode = {
.clock = 29500,
.hdisplay = 800,
@@ -1632,6 +1701,30 @@ static const struct panel_desc starry_kr122ea0sra = {
},
};
+static const struct display_timing tianma_tm070jdhg30_timing = {
+ .pixelclock = { 62600000, 68200000, 78100000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 15, 64, 159 },
+ .hback_porch = { 5, 5, 5 },
+ .hsync_len = { 1, 1, 256 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 3, 40, 99 },
+ .vback_porch = { 2, 2, 2 },
+ .vsync_len = { 1, 1, 128 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc tianma_tm070jdhg30 = {
+ .timings = &tianma_tm070jdhg30_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 151,
+ .height = 95,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct drm_display_mode tpk_f07a_0102_mode = {
.clock = 33260,
.hdisplay = 800,
@@ -1748,6 +1841,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "avic,tm070ddh03",
.data = &avic_tm070ddh03,
}, {
+ .compatible = "boe,nv101wxmn51",
+ .data = &boe_nv101wxmn51,
+ }, {
.compatible = "chunghwa,claa070wp03xg",
.data = &chunghwa_claa070wp03xg,
}, {
@@ -1826,6 +1922,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "nec,nl4827hc19-05b",
.data = &nec_nl4827hc19_05b,
}, {
+ .compatible = "netron-dy,e231732",
+ .data = &netron_dy_e231732,
+ }, {
.compatible = "nvd,9128",
.data = &nvd_9128,
}, {
@@ -1868,6 +1967,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "starry,kr122ea0sra",
.data = &starry_kr122ea0sra,
}, {
+ .compatible = "tianma,tm070jdhg30",
+ .data = &tianma_tm070jdhg30,
+ }, {
.compatible = "tpk,f07a-0102",
.data = &tpk_f07a_0102,
}, {
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index da45b11b66b8..378da5918e6c 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -1,6 +1,6 @@
config DRM_QXL
tristate "QXL virtual GPU"
- depends on DRM && PCI
+ depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_TTM
select CRC32
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 241af9131dc8..d58751c94618 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -84,8 +84,18 @@ int
qxl_debugfs_init(struct drm_minor *minor)
{
#if defined(CONFIG_DEBUG_FS)
+ int r;
+ struct qxl_device *dev =
+ (struct qxl_device *) minor->dev->dev_private;
+
drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
+
+ r = qxl_ttm_debugfs_init(dev);
+ if (r) {
+ DRM_ERROR("Failed to init TTM debugfs\n");
+ return r;
+ }
#endif
return 0;
}
@@ -123,8 +133,8 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
qdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
- qdev->ddev->primary->debugfs_root,
- qdev->ddev->primary);
+ qdev->ddev.primary->debugfs_root,
+ qdev->ddev.primary);
#endif
return 0;
}
@@ -137,7 +147,7 @@ void qxl_debugfs_remove_files(struct qxl_device *qdev)
for (i = 0; i < qdev->debugfs_count; i++) {
drm_debugfs_remove_files(qdev->debugfs[i].files,
qdev->debugfs[i].num_files,
- qdev->ddev->primary);
+ qdev->ddev.primary);
}
#endif
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 4b5eab8a47b3..1094cd33eb06 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -136,7 +136,7 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
static void qxl_update_offset_props(struct qxl_device *qdev)
{
- struct drm_device *dev = qdev->ddev;
+ struct drm_device *dev = &qdev->ddev;
struct drm_connector *connector;
struct qxl_output *output;
struct qxl_head *head;
@@ -156,7 +156,7 @@ static void qxl_update_offset_props(struct qxl_device *qdev)
void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
{
- struct drm_device *dev = qdev->ddev;
+ struct drm_device *dev = &qdev->ddev;
int status;
status = qxl_display_copy_rom_client_monitors_config(qdev);
@@ -174,10 +174,10 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
drm_modeset_lock_all(dev);
qxl_update_offset_props(qdev);
drm_modeset_unlock_all(dev);
- if (!drm_helper_hpd_irq_event(qdev->ddev)) {
+ if (!drm_helper_hpd_irq_event(dev)) {
/* notify that the monitor configuration changed, to
adjust at the arbitrary resolution */
- drm_kms_helper_hotplug_event(qdev->ddev);
+ drm_kms_helper_hotplug_event(dev);
}
}
@@ -624,12 +624,12 @@ qxl_framebuffer_init(struct drm_device *dev,
int ret;
qfb->obj = obj;
+ drm_helper_mode_fill_fb_struct(dev, &qfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &qfb->base, funcs);
if (ret) {
qfb->obj = NULL;
return ret;
}
- drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd);
return 0;
}
@@ -1036,7 +1036,7 @@ static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
return 0;
qdev->hotplug_mode_update_property =
- drm_property_create_range(qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
+ drm_property_create_range(&qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
"hotplug_mode_update", 0, 1);
return 0;
@@ -1077,7 +1077,6 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
- drm_connector_register(connector);
return 0;
}
@@ -1176,28 +1175,28 @@ int qxl_modeset_init(struct qxl_device *qdev)
int i;
int ret;
- drm_mode_config_init(qdev->ddev);
+ drm_mode_config_init(&qdev->ddev);
ret = qxl_create_monitors_object(qdev);
if (ret)
return ret;
- qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs;
+ qdev->ddev.mode_config.funcs = (void *)&qxl_mode_funcs;
/* modes will be validated against the framebuffer size */
- qdev->ddev->mode_config.min_width = 320;
- qdev->ddev->mode_config.min_height = 200;
- qdev->ddev->mode_config.max_width = 8192;
- qdev->ddev->mode_config.max_height = 8192;
+ qdev->ddev.mode_config.min_width = 320;
+ qdev->ddev.mode_config.min_height = 200;
+ qdev->ddev.mode_config.max_width = 8192;
+ qdev->ddev.mode_config.max_height = 8192;
- qdev->ddev->mode_config.fb_base = qdev->vram_base;
+ qdev->ddev.mode_config.fb_base = qdev->vram_base;
- drm_mode_create_suggested_offset_properties(qdev->ddev);
+ drm_mode_create_suggested_offset_properties(&qdev->ddev);
qxl_mode_create_hotplug_mode_update_property(qdev);
for (i = 0 ; i < qxl_num_crtc; ++i) {
- qdev_crtc_init(qdev->ddev, i);
- qdev_output_init(qdev->ddev, i);
+ qdev_crtc_init(&qdev->ddev, i);
+ qdev_output_init(&qdev->ddev, i);
}
qdev->mode_info.mode_config_initialized = true;
@@ -1215,7 +1214,7 @@ void qxl_modeset_fini(struct qxl_device *qdev)
qxl_destroy_monitors_object(qdev);
if (qdev->mode_info.mode_config_initialized) {
- drm_mode_config_cleanup(qdev->ddev);
+ drm_mode_config_cleanup(&qdev->ddev);
qdev->mode_info.mode_config_initialized = false;
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 9b728edf1b49..4d8681e84e68 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -283,7 +283,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
struct qxl_rect *rects;
int stride = qxl_fb->base.pitches[0];
/* depth is not actually interesting, we don't mask with it */
- int depth = qxl_fb->base.bits_per_pixel;
+ int depth = qxl_fb->base.format->cpp[0] * 8;
uint8_t *surface_base;
struct qxl_release *release;
struct qxl_bo *clips_bo;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 460bbceae297..8e17c241e63c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -62,20 +62,71 @@ static struct pci_driver qxl_pci_driver;
static int
qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct qxl_device *qdev;
+ int ret;
+
if (pdev->revision < 4) {
DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
" use xf86-video-qxl in user mode");
return -EINVAL; /* TODO: ENODEV ? */
}
- return drm_get_pci_dev(pdev, ent, &qxl_driver);
+
+ qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
+ if (!qdev)
+ return -ENOMEM;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto free_dev;
+
+ ret = qxl_device_init(qdev, &qxl_driver, pdev, ent->driver_data);
+ if (ret)
+ goto disable_pci;
+
+ ret = drm_vblank_init(&qdev->ddev, 1);
+ if (ret)
+ goto unload;
+
+ ret = qxl_modeset_init(qdev);
+ if (ret)
+ goto vblank_cleanup;
+
+ drm_kms_helper_poll_init(&qdev->ddev);
+
+ /* Complete initialization. */
+ ret = drm_dev_register(&qdev->ddev, ent->driver_data);
+ if (ret)
+ goto modeset_cleanup;
+
+ return 0;
+
+modeset_cleanup:
+ qxl_modeset_fini(qdev);
+vblank_cleanup:
+ drm_vblank_cleanup(&qdev->ddev);
+unload:
+ qxl_device_fini(qdev);
+disable_pci:
+ pci_disable_device(pdev);
+free_dev:
+ kfree(qdev);
+ return ret;
}
static void
qxl_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct qxl_device *qdev = dev->dev_private;
+
+ drm_dev_unregister(dev);
+
+ qxl_modeset_fini(qdev);
+ qxl_device_fini(qdev);
- drm_put_dev(dev);
+ dev->dev_private = NULL;
+ kfree(qdev);
+ drm_dev_unref(dev);
}
static const struct file_operations qxl_fops = {
@@ -230,8 +281,6 @@ static struct pci_driver qxl_pci_driver = {
static struct drm_driver qxl_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
- .load = qxl_driver_load,
- .unload = qxl_driver_unload,
.get_vblank_counter = qxl_noop_get_vblank_counter,
.enable_vblank = qxl_noop_enable_vblank,
.disable_vblank = qxl_noop_disable_vblank,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 785aad42e9bb..785c17b56f73 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -43,6 +43,7 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
/* just for ttm_validate_buffer */
@@ -241,9 +242,7 @@ void qxl_debugfs_remove_files(struct qxl_device *qdev);
struct qxl_device;
struct qxl_device {
- struct device *dev;
- struct drm_device *ddev;
- struct pci_dev *pdev;
+ struct drm_device ddev;
unsigned long flags;
resource_size_t vram_base, vram_size;
@@ -335,8 +334,9 @@ __printf(2,3) void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
extern const struct drm_ioctl_desc qxl_ioctls[];
extern int qxl_max_ioctl;
-int qxl_driver_load(struct drm_device *dev, unsigned long flags);
-int qxl_driver_unload(struct drm_device *dev);
+int qxl_device_init(struct qxl_device *qdev, struct drm_driver *drv,
+ struct pci_dev *pdev, unsigned long flags);
+void qxl_device_fini(struct qxl_device *qdev);
int qxl_modeset_init(struct qxl_device *qdev);
void qxl_modeset_fini(struct qxl_device *qdev);
@@ -530,6 +530,7 @@ int qxl_garbage_collect(struct qxl_device *qdev);
int qxl_debugfs_init(struct drm_minor *minor);
void qxl_debugfs_takedown(struct drm_minor *minor);
+int qxl_ttm_debugfs_init(struct qxl_device *qdev);
/* qxl_prime.c */
int qxl_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index fd7e5e94be5b..d479b7a7abe4 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -268,7 +268,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
info->par = qfbdev;
- qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
+ qxl_framebuffer_init(&qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
&qxlfb_fb_funcs);
fb = &qfbdev->qfb.base;
@@ -279,7 +279,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
qfbdev->shadow = shadow;
strcpy(info->fix.id, "qxldrmfb");
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
info->fbops = &qxlfb_ops;
@@ -297,7 +297,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
+ info->apertures->ranges[0].base = qdev->ddev.mode_config.fb_base;
info->apertures->ranges[0].size = qdev->vram_size;
info->fix.mmio_start = 0;
@@ -316,7 +316,8 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
qdev->fbdev_info = info;
qdev->fbdev_qfb = &qfbdev->qfb;
DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
- DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
+ DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n",
+ fb->format->depth, fb->pitches[0], fb->width, fb->height);
return 0;
out_destroy_fbi:
@@ -394,11 +395,10 @@ int qxl_fbdev_init(struct qxl_device *qdev)
spin_lock_init(&qfbdev->delayed_ops_lock);
INIT_LIST_HEAD(&qfbdev->delayed_ops);
- drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
+ drm_fb_helper_prepare(&qdev->ddev, &qfbdev->helper,
&qxl_fb_helper_funcs);
- ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
- qxl_num_crtc /* num_crtc - QXL supports just 1 */,
+ ret = drm_fb_helper_init(&qdev->ddev, &qfbdev->helper,
QXLFB_CONN_LIMIT);
if (ret)
goto free;
@@ -425,7 +425,7 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
if (!qdev->mode_info.qfbdev)
return;
- qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
+ qxl_fbdev_destroy(&qdev->ddev, qdev->mode_info.qfbdev);
kfree(qdev->mode_info.qfbdev);
qdev->mode_info.qfbdev = NULL;
}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 5a4c8c492683..0b82a87916ae 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -64,7 +64,7 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_map *qxl_map = data;
- return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
+ return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
&qxl_map->offset);
}
@@ -375,7 +375,7 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
byte = param->index / 8;
idx = param->index % 8;
- if (qdev->pdev->revision < 4)
+ if (dev->pdev->revision < 4)
return -ENOSYS;
if (byte >= 58)
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 0bf1e20c6e44..23a40106ab53 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -90,7 +90,7 @@ int qxl_irq_init(struct qxl_device *qdev)
atomic_set(&qdev->irq_received_cursor, 0);
atomic_set(&qdev->irq_received_io_cmd, 0);
qdev->irq_received_error = 0;
- ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
+ ret = drm_irq_install(&qdev->ddev, qdev->ddev.pdev->irq);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (unlikely(ret != 0)) {
DRM_ERROR("Failed installing irq: %d\n", ret);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index af685f1d91f8..2dcd5c14cb56 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -115,16 +115,21 @@ static void qxl_gc_work(struct work_struct *work)
qxl_garbage_collect(qdev);
}
-static int qxl_device_init(struct qxl_device *qdev,
- struct drm_device *ddev,
+int qxl_device_init(struct qxl_device *qdev,
+ struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags)
{
int r, sb;
- qdev->dev = &pdev->dev;
- qdev->ddev = ddev;
- qdev->pdev = pdev;
+ r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
+ if (r)
+ return r;
+
+ qdev->ddev.pdev = pdev;
+ pci_set_drvdata(pdev, &qdev->ddev);
+ qdev->ddev.dev_private = qdev;
+
qdev->flags = flags;
mutex_init(&qdev->gem.mutex);
@@ -263,7 +268,7 @@ static int qxl_device_init(struct qxl_device *qdev,
return 0;
}
-static void qxl_device_fini(struct qxl_device *qdev)
+void qxl_device_fini(struct qxl_device *qdev)
{
if (qdev->current_release_bo[0])
qxl_bo_unref(&qdev->current_release_bo[0]);
@@ -284,56 +289,3 @@ static void qxl_device_fini(struct qxl_device *qdev)
qdev->mode_info.num_modes = 0;
qxl_debugfs_remove_files(qdev);
}
-
-int qxl_driver_unload(struct drm_device *dev)
-{
- struct qxl_device *qdev = dev->dev_private;
-
- if (qdev == NULL)
- return 0;
-
- drm_vblank_cleanup(dev);
-
- qxl_modeset_fini(qdev);
- qxl_device_fini(qdev);
-
- kfree(qdev);
- dev->dev_private = NULL;
- return 0;
-}
-
-int qxl_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct qxl_device *qdev;
- int r;
-
- qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
- if (qdev == NULL)
- return -ENOMEM;
-
- dev->dev_private = qdev;
-
- r = qxl_device_init(qdev, dev, dev->pdev, flags);
- if (r)
- goto out;
-
- r = drm_vblank_init(dev, 1);
- if (r)
- goto unload;
-
- r = qxl_modeset_init(qdev);
- if (r)
- goto unload;
-
- drm_kms_helper_poll_init(qdev->ddev);
-
- return 0;
-unload:
- qxl_driver_unload(dev);
-
-out:
- kfree(qdev);
- return r;
-}
-
-
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index fa5440dc9a19..dbc13510a1f8 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -93,7 +93,7 @@ int qxl_bo_create(struct qxl_device *qdev,
if (bo == NULL)
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
- r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
+ r = drm_gem_object_init(&qdev->ddev, &bo->gem_base, size);
if (unlikely(r)) {
kfree(bo);
return r;
@@ -113,7 +113,7 @@ int qxl_bo_create(struct qxl_device *qdev,
NULL, NULL, &qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
- dev_err(qdev->dev,
+ dev_err(qdev->ddev.dev,
"object_init failed for (%lu, 0x%08X)\n",
size, domain);
return r;
@@ -223,7 +223,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
{
- struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+ struct drm_device *ddev = bo->gem_base.dev;
int r;
if (bo->pin_count) {
@@ -240,17 +240,17 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
*gpu_addr = qxl_bo_gpu_offset(bo);
}
if (unlikely(r != 0))
- dev_err(qdev->dev, "%p pin failed\n", bo);
+ dev_err(ddev->dev, "%p pin failed\n", bo);
return r;
}
int qxl_bo_unpin(struct qxl_bo *bo)
{
- struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+ struct drm_device *ddev = bo->gem_base.dev;
int r, i;
if (!bo->pin_count) {
- dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
+ dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
return 0;
}
bo->pin_count--;
@@ -260,7 +260,7 @@ int qxl_bo_unpin(struct qxl_bo *bo)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r != 0))
- dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
+ dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
return r;
}
@@ -270,9 +270,9 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
if (list_empty(&qdev->gem.objects))
return;
- dev_err(qdev->dev, "Userspace still has active objects !\n");
+ dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
- dev_err(qdev->dev, "%p %p %lu %lu force free\n",
+ dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
*((unsigned long *)&bo->gem_base.refcount));
mutex_lock(&qdev->gem.mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 4d8311373ba3..0374fd93f4d6 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -34,8 +34,8 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
- struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
- dev_err(qdev->dev, "%p reserve failed\n", bo);
+ struct drm_device *ddev = bo->gem_base.dev;
+ dev_err(ddev->dev, "%p reserve failed\n", bo);
}
return r;
}
@@ -70,8 +70,8 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
- struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
- dev_err(qdev->dev, "%p reserve failed for wait\n",
+ struct drm_device *ddev = bo->gem_base.dev;
+ dev_err(ddev->dev, "%p reserve failed for wait\n",
bo);
}
return r;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 11761330a6b8..4e1a40389964 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -35,7 +35,6 @@
#include "qxl_object.h"
#include <linux/delay.h>
-static int qxl_ttm_debugfs_init(struct qxl_device *qdev);
static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
{
@@ -367,6 +366,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
struct ttm_mem_reg *new_mem)
{
struct qxl_bo *qbo;
@@ -394,8 +394,6 @@ static struct ttm_bo_driver qxl_bo_driver = {
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
.move_notify = &qxl_bo_move_notify,
- .lru_tail = &ttm_bo_default_lru_tail,
- .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int qxl_ttm_init(struct qxl_device *qdev)
@@ -410,7 +408,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
r = ttm_bo_device_init(&qdev->mman.bdev,
qdev->mman.bo_global_ref.ref.object,
&qxl_bo_driver,
- qdev->ddev->anon_inode->i_mapping,
+ qdev->ddev.anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -436,11 +434,6 @@ int qxl_ttm_init(struct qxl_device *qdev)
((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
DRM_INFO("qxl: %uM of Surface memory size\n",
(unsigned)qdev->surfaceram_size / (1024 * 1024));
- r = qxl_ttm_debugfs_init(qdev);
- if (r) {
- DRM_ERROR("Failed to init debugfs\n");
- return r;
- }
return 0;
}
@@ -463,17 +456,17 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct qxl_device *rdev = dev->dev_private;
- int ret;
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+ struct drm_printer p = drm_seq_file_printer(m);
spin_lock(&glob->lru_lock);
- ret = drm_mm_dump_table(m, mm);
+ drm_mm_print(mm, &p);
spin_unlock(&glob->lru_lock);
- return ret;
+ return 0;
}
#endif
-static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+int qxl_ttm_debugfs_init(struct qxl_device *qdev)
{
#if defined(CONFIG_DEBUG_FS)
static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 05f4ebe31ce2..3c492a0aa6bd 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1195,7 +1195,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (target_fb->pixel_format) {
+ switch (target_fb->format->format) {
case DRM_FORMAT_C8:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
@@ -1261,7 +1261,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format, &format_name));
+ drm_get_format_name(target_fb->format->format, &format_name));
return -EINVAL;
}
@@ -1277,7 +1277,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
/* Calculate the macrotile mode index. */
tile_split_bytes = 64 << tile_split;
- tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+ tileb = 8 * 8 * target_fb->format->cpp[0];
tileb = min(tile_split_bytes, tileb);
for (index = 0; tileb > 64; index++)
@@ -1285,13 +1285,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
if (index >= 16) {
DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
- target_fb->bits_per_pixel, tile_split);
+ target_fb->format->cpp[0] * 8,
+ tile_split);
return -EINVAL;
}
num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
} else {
- switch (target_fb->bits_per_pixel) {
+ switch (target_fb->format->cpp[0] * 8) {
case 8:
index = 10;
break;
@@ -1414,7 +1415,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
@@ -1510,7 +1511,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (target_fb->pixel_format) {
+ switch (target_fb->format->format) {
case DRM_FORMAT_C8:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
@@ -1563,7 +1564,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format, &format_name));
+ drm_get_format_name(target_fb->format->format, &format_name));
return -EINVAL;
}
@@ -1621,7 +1622,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f5e84f4b58e6..e3399310d41d 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3225,13 +3225,19 @@ void r100_bandwidth_update(struct radeon_device *rdev)
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled) {
+ const struct drm_framebuffer *fb =
+ rdev->mode_info.crtcs[0]->base.primary->fb;
+
mode1 = &rdev->mode_info.crtcs[0]->base.mode;
- pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8;
+ pixel_bytes1 = fb->format->cpp[0];
}
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
if (rdev->mode_info.crtcs[1]->base.enabled) {
+ const struct drm_framebuffer *fb =
+ rdev->mode_info.crtcs[1]->base.primary->fb;
+
mode2 = &rdev->mode_info.crtcs[1]->base.mode;
- pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8;
+ pixel_bytes2 = fb->format->cpp[0];
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index c829cfb02fc4..04c0ed41374f 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -596,52 +596,56 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
#ifdef CONFIG_ACPI
static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
{
- bool ret = false;
struct acpi_table_header *hdr;
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
- GOP_VBIOS_CONTENT *vbios;
- VFCT_IMAGE_HEADER *vhdr;
+ unsigned offset;
if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
- goto out_unmap;
+ return false;
}
vfct = (UEFI_ACPI_VFCT *)hdr;
- if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
- DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
- goto out_unmap;
- }
+ offset = vfct->VBIOSImageOffset;
- vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
- vhdr = &vbios->VbiosHeader;
- DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
- vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
- vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
-
- if (vhdr->PCIBus != rdev->pdev->bus->number ||
- vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
- vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
- vhdr->VendorID != rdev->pdev->vendor ||
- vhdr->DeviceID != rdev->pdev->device) {
- DRM_INFO("ACPI VFCT table is not for this card\n");
- goto out_unmap;
- }
+ while (offset < tbl_size) {
+ GOP_VBIOS_CONTENT *vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + offset);
+ VFCT_IMAGE_HEADER *vhdr = &vbios->VbiosHeader;
- if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
- DRM_ERROR("ACPI VFCT image truncated\n");
- goto out_unmap;
- }
+ offset += sizeof(VFCT_IMAGE_HEADER);
+ if (offset > tbl_size) {
+ DRM_ERROR("ACPI VFCT image header truncated\n");
+ return false;
+ }
- rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
- ret = !!rdev->bios;
+ offset += vhdr->ImageLength;
+ if (offset > tbl_size) {
+ DRM_ERROR("ACPI VFCT image truncated\n");
+ return false;
+ }
+
+ if (vhdr->ImageLength &&
+ vhdr->PCIBus == rdev->pdev->bus->number &&
+ vhdr->PCIDevice == PCI_SLOT(rdev->pdev->devfn) &&
+ vhdr->PCIFunction == PCI_FUNC(rdev->pdev->devfn) &&
+ vhdr->VendorID == rdev->pdev->vendor &&
+ vhdr->DeviceID == rdev->pdev->device) {
+ rdev->bios = kmemdup(&vbios->VbiosContent,
+ vhdr->ImageLength,
+ GFP_KERNEL);
+
+ if (!rdev->bios)
+ return false;
+ return true;
+ }
+ }
-out_unmap:
- return ret;
+ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+ return false;
}
#else
static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 510ea371dacc..a8442f7196d6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -121,7 +121,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
VRAM, also but everything into VRAM on AGP cards and older
IGP chips to avoid image corruptions */
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
- (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
+ (i == 0 || pci_find_capability(p->rdev->ddev->pdev,
+ PCI_CAP_ID_AGP) ||
p->rdev->family == CHIP_RS780 ||
p->rdev->family == CHIP_RS880)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 8a1df2a1afbd..4b0c388be3f5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1549,8 +1549,6 @@ failed:
return r;
}
-static void radeon_debugfs_remove_files(struct radeon_device *rdev);
-
/**
* radeon_device_fini - tear down the driver
*
@@ -1577,7 +1575,6 @@ void radeon_device_fini(struct radeon_device *rdev)
rdev->rmmio = NULL;
if (rdev->family >= CHIP_BONAIRE)
radeon_doorbell_fini(rdev);
- radeon_debugfs_remove_files(rdev);
}
@@ -1954,16 +1951,3 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
#endif
return 0;
}
-
-static void radeon_debugfs_remove_files(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < rdev->debugfs_count; i++) {
- drm_debugfs_remove_files(rdev->debugfs[i].files,
- rdev->debugfs[i].num_files,
- rdev->ddev->primary);
- }
-#endif
-}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index e7409e8a9f87..aea8b62835a4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -549,19 +549,19 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
base -= radeon_crtc->legacy_display_base_addr;
- pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
+ pitch_pixels = fb->pitches[0] / fb->format->cpp[0];
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev)) {
base &= ~0x7ff;
} else {
- int byteshift = fb->bits_per_pixel >> 4;
+ int byteshift = fb->format->cpp[0] * 8 >> 4;
int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11;
base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
}
} else {
int offset = crtc->y * pitch_pixels + crtc->x;
- switch (fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 8:
default:
offset *= 1;
@@ -1327,7 +1327,7 @@ radeon_framebuffer_init(struct drm_device *dev,
{
int ret;
rfb->obj = obj;
- drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
if (ret) {
rfb->obj = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 6d1237d6e1b8..7d5ada3980dc 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -667,7 +667,7 @@ radeon_dp_mst_init(struct radeon_connector *radeon_connector)
return 0;
radeon_connector->mst_mgr.cbs = &mst_cbs;
- return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev->dev,
+ return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev,
&radeon_connector->ddc_bus->aux, 16, 6,
radeon_connector->base.base.id);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 30bd4a6a9d46..956c425e639e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -103,7 +103,7 @@
#define KMS_DRIVER_MINOR 49
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
-int radeon_driver_unload_kms(struct drm_device *dev);
+void radeon_driver_unload_kms(struct drm_device *dev);
void radeon_driver_lastclose_kms(struct drm_device *dev);
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void radeon_driver_postclose_kms(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 899b6a1644bd..2be4fe9c7217 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -263,7 +263,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "radeondrmfb");
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
@@ -290,7 +290,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
- DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
@@ -366,7 +366,6 @@ int radeon_fbdev_init(struct radeon_device *rdev)
&radeon_fb_helper_funcs);
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
- rdev->num_crtc,
RADEONFB_CONN_LIMIT);
if (ret)
goto free;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index c084cadcbf21..1b7528df7f7f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -85,10 +85,8 @@ static void radeon_hotplug_work_func(struct work_struct *work)
return;
mutex_lock(&mode_config->mutex);
- if (mode_config->num_connector) {
- list_for_each_entry(connector, &mode_config->connector_list, head)
- radeon_connector_hotplug(connector);
- }
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
@@ -103,10 +101,8 @@ static void radeon_dp_work_func(struct work_struct *work)
struct drm_connector *connector;
/* this should take a mutex */
- if (mode_config->num_connector) {
- list_for_each_entry(connector, &mode_config->connector_list, head)
- radeon_connector_hotplug(connector);
- }
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
}
/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 4388ddeec8d2..56f35c06742c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -53,12 +53,12 @@ static inline bool radeon_has_atpx(void) { return false; }
* the rest of the device (CP, writeback, etc.).
* Returns 0 on success.
*/
-int radeon_driver_unload_kms(struct drm_device *dev)
+void radeon_driver_unload_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
if (rdev == NULL)
- return 0;
+ return;
if (rdev->rmmio == NULL)
goto done_free;
@@ -78,7 +78,6 @@ int radeon_driver_unload_kms(struct drm_device *dev)
done_free:
kfree(rdev);
dev->dev_private = NULL;
- return 0;
}
/**
@@ -106,7 +105,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)rdev;
/* update BUS flag */
- if (drm_pci_device_is_agp(dev)) {
+ if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
flags |= RADEON_IS_AGP;
} else if (pci_is_pcie(dev->pdev)) {
flags |= RADEON_IS_PCIE;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index d0de4022fff9..ce6cb6666212 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -402,7 +402,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
target_fb = crtc->primary->fb;
}
- switch (target_fb->bits_per_pixel) {
+ switch (target_fb->format->cpp[0] * 8) {
case 8:
format = 2;
break;
@@ -476,10 +476,9 @@ retry:
crtc_offset_cntl = 0;
- pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
- crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
- ((target_fb->bits_per_pixel * 8) - 1)) /
- (target_fb->bits_per_pixel * 8));
+ pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
+ crtc_pitch = DIV_ROUND_UP(pitch_pixels * target_fb->format->cpp[0] * 8,
+ target_fb->format->cpp[0] * 8 * 8);
crtc_pitch |= crtc_pitch << 16;
crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
@@ -504,14 +503,14 @@ retry:
crtc_tile_x0_y0 = x | (y << 16);
base &= ~0x7ff;
} else {
- int byteshift = target_fb->bits_per_pixel >> 4;
+ int byteshift = target_fb->format->cpp[0] * 8 >> 4;
int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11;
base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
crtc_offset_cntl |= (y % 16);
}
} else {
int offset = y * pitch_pixels + x;
- switch (target_fb->bits_per_pixel) {
+ switch (target_fb->format->cpp[0] * 8) {
case 8:
offset *= 1;
break;
@@ -579,6 +578,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ const struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_encoder *encoder;
int format;
int hsync_start;
@@ -602,7 +602,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
}
}
- switch (crtc->primary->fb->bits_per_pixel) {
+ switch (fb->format->cpp[0] * 8) {
case 8:
format = 2;
break;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f1da484864a9..ad282648fc8b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -32,6 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_fixed.h>
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 41b72ce6613f..74b276060c20 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -765,6 +765,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
struct ttm_mem_reg *new_mem)
{
struct radeon_bo *rbo;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index a10bb3deee54..9ffd8215d38a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -150,6 +150,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
struct ttm_mem_reg *new_mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0cf03ccbf0a7..7a10b3852970 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -502,6 +502,8 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
mem->bus.addr =
ioremap_nocache(mem->bus.base + mem->bus.offset,
mem->bus.size);
+ if (!mem->bus.addr)
+ return -ENOMEM;
/*
* Alpha: Use just the bus offset plus
@@ -871,8 +873,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
- .lru_tail = &ttm_bo_default_lru_tail,
- .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int radeon_ttm_init(struct radeon_device *rdev)
@@ -1033,13 +1033,13 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
- int ret;
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+ struct drm_printer p = drm_seq_file_printer(m);
spin_lock(&glob->lru_lock);
- ret = drm_mm_dump_table(m, mm);
+ drm_mm_print(mm, &p);
spin_unlock(&glob->lru_lock);
- return ret;
+ return 0;
}
static int ttm_pl_vram = TTM_PL_VRAM;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2944916f7102..d12b8978142f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2912,29 +2912,6 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
return ret;
}
-struct si_dpm_quirk {
- u32 chip_vendor;
- u32 chip_device;
- u32 subsys_vendor;
- u32 subsys_device;
- u32 max_sclk;
- u32 max_mclk;
-};
-
-/* cards with dpm stability problems */
-static struct si_dpm_quirk si_dpm_quirk_list[] = {
- /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
- { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
- { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
- { 0, 0, 0, 0 },
-};
-
static u16 si_get_lower_of_leakage_and_vce_voltage(struct radeon_device *rdev,
u16 vce_voltage)
{
@@ -2997,18 +2974,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
u32 max_sclk = 0, max_mclk = 0;
int i;
- struct si_dpm_quirk *p = si_dpm_quirk_list;
- /* limit all SI kickers */
- if (rdev->family == CHIP_PITCAIRN) {
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->device == 0x6810) ||
- (rdev->pdev->device == 0x6811) ||
- (rdev->pdev->device == 0x6816) ||
- (rdev->pdev->device == 0x6817) ||
- (rdev->pdev->device == 0x6806))
- max_mclk = 120000;
- } else if (rdev->family == CHIP_HAINAN) {
+ if (rdev->family == CHIP_HAINAN) {
if ((rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
(rdev->pdev->revision == 0xC3) ||
@@ -3018,18 +2985,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
max_sclk = 75000;
}
}
- /* Apply dpm quirks */
- while (p && p->chip_device != 0) {
- if (rdev->pdev->vendor == p->chip_vendor &&
- rdev->pdev->device == p->chip_device &&
- rdev->pdev->subsystem_vendor == p->subsys_vendor &&
- rdev->pdev->subsystem_device == p->subsys_device) {
- max_sclk = p->max_sclk;
- max_mclk = p->max_mclk;
- break;
- }
- ++p;
- }
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
index a01efe39a820..f541a4b5ac51 100644
--- a/drivers/gpu/drm/radeon/vce_v1_0.c
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -196,7 +196,7 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
memset(&data[5], 0, 44);
memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
- data += le32_to_cpu(data[4]) / 4;
+ data += (le32_to_cpu(sign->len) + 64) / 4;
data[0] = sign->val[i].sigval[0];
data[1] = sign->val[i].sigval[1];
data[2] = sign->val[i].sigval[2];
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index 7fc10a9c34c3..a050a3699857 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -15,6 +15,7 @@
#define __RCAR_DU_ENCODER_H__
#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
struct rcar_du_device;
struct rcar_du_hdmienc;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index f9515f53cc5b..c4c5d1abcff8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -124,10 +124,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
hdmienc->renc = renc;
/* Link the bridge to the encoder. */
- bridge->encoder = encoder;
- encoder->bridge = bridge;
-
- ret = drm_bridge_attach(rcdu->ddev, bridge);
+ ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
drm_encoder_cleanup(encoder);
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index b5d3f16cfa12..ff61f6032f2c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -662,7 +662,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
drm_kms_helper_poll_init(dev);
if (dev->mode_config.num_connector) {
- fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
+ fbdev = drm_fbdev_cma_init(dev, 32,
dev->mode_config.num_connector);
if (IS_ERR(fbdev))
return PTR_ERR(fbdev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index a74f8ed8ca2e..dcde6288da6c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -567,10 +567,10 @@ static int rcar_du_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- rstate->format = rcar_du_format_info(state->fb->pixel_format);
+ rstate->format = rcar_du_format_info(state->fb->format->format);
if (rstate->format == NULL) {
dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
- state->fb->pixel_format);
+ state->fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 83ebd162f3ef..b5bfbe50bd87 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -201,10 +201,10 @@ static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- rstate->format = rcar_du_format_info(state->fb->pixel_format);
+ rstate->format = rcar_du_format_info(state->fb->format->format);
if (rstate->format == NULL) {
dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
- state->fb->pixel_format);
+ state->fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 6f7f9c59f05b..ad31b3eb408f 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -21,6 +21,16 @@ config ROCKCHIP_ANALOGIX_DP
for the Analogix Core DP driver. If you want to enable DP
on RK3288 based SoC, you should selet this option.
+config ROCKCHIP_CDN_DP
+ tristate "Rockchip cdn DP"
+ depends on DRM_ROCKCHIP
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the cdn DP driver. If you want to enable Dp on
+ RK3399 based SoC, you should select this
+ option.
+
config ROCKCHIP_DW_HDMI
tristate "Rockchip specific extensions for Synopsys DW HDMI"
depends on DRM_ROCKCHIP
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 9746365694ba..c931e2a7d8de 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -7,6 +7,8 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
+obj-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp.o
+cdn-dp-objs := cdn-dp-core.o cdn-dp-reg.o
obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
new file mode 100644
index 000000000000..9ab67a670885
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -0,0 +1,1260 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/extcon.h>
+#include <linux/firmware.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+
+#include <sound/hdmi-codec.h>
+
+#include "cdn-dp-core.h"
+#include "cdn-dp-reg.h"
+#include "rockchip_drm_vop.h"
+
+#define connector_to_dp(c) \
+ container_of(c, struct cdn_dp_device, connector)
+
+#define encoder_to_dp(c) \
+ container_of(c, struct cdn_dp_device, encoder)
+
+#define GRF_SOC_CON9 0x6224
+#define DP_SEL_VOP_LIT BIT(12)
+#define GRF_SOC_CON26 0x6268
+#define UPHY_SEL_BIT 3
+#define UPHY_SEL_MASK BIT(19)
+#define DPTX_HPD_SEL (3 << 12)
+#define DPTX_HPD_DEL (2 << 12)
+#define DPTX_HPD_SEL_MASK (3 << 28)
+
+#define CDN_FW_TIMEOUT_MS (64 * 1000)
+#define CDN_DPCD_TIMEOUT_MS 5000
+#define CDN_DP_FIRMWARE "rockchip/dptx.bin"
+
+struct cdn_dp_data {
+ u8 max_phy;
+};
+
+struct cdn_dp_data rk3399_cdn_dp = {
+ .max_phy = 2,
+};
+
+static const struct of_device_id cdn_dp_dt_ids[] = {
+ { .compatible = "rockchip,rk3399-cdn-dp",
+ .data = (void *)&rk3399_cdn_dp },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
+
+static int cdn_dp_grf_write(struct cdn_dp_device *dp,
+ unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ ret = clk_prepare_enable(dp->grf_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
+ return ret;
+ }
+
+ ret = regmap_write(dp->grf, reg, val);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
+ return ret;
+ }
+
+ clk_disable_unprepare(dp->grf_clk);
+
+ return 0;
+}
+
+static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
+{
+ int ret;
+ u32 rate;
+
+ ret = clk_prepare_enable(dp->pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
+ goto err_pclk;
+ }
+
+ ret = clk_prepare_enable(dp->core_clk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
+ goto err_core_clk;
+ }
+
+ ret = pm_runtime_get_sync(dp->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
+ goto err_pclk;
+ }
+
+ reset_control_assert(dp->core_rst);
+ reset_control_assert(dp->dptx_rst);
+ reset_control_assert(dp->apb_rst);
+ reset_control_deassert(dp->core_rst);
+ reset_control_deassert(dp->dptx_rst);
+ reset_control_deassert(dp->apb_rst);
+
+ rate = clk_get_rate(dp->core_clk);
+ if (!rate) {
+ DRM_DEV_ERROR(dp->dev, "get clk rate failed: %d\n", rate);
+ goto err_set_rate;
+ }
+
+ cdn_dp_set_fw_clk(dp, rate);
+ cdn_dp_clock_reset(dp);
+
+ return 0;
+
+err_set_rate:
+ clk_disable_unprepare(dp->core_clk);
+err_core_clk:
+ clk_disable_unprepare(dp->pclk);
+err_pclk:
+ return ret;
+}
+
+static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
+{
+ pm_runtime_put_sync(dp->dev);
+ clk_disable_unprepare(dp->pclk);
+ clk_disable_unprepare(dp->core_clk);
+}
+
+static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
+{
+ struct extcon_dev *edev = port->extcon;
+ union extcon_property_value property;
+ int dptx;
+ u8 lanes;
+
+ dptx = extcon_get_state(edev, EXTCON_DISP_DP);
+ if (dptx > 0) {
+ extcon_get_property(edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_SS, &property);
+ if (property.intval)
+ lanes = 2;
+ else
+ lanes = 4;
+ } else {
+ lanes = 0;
+ }
+
+ return lanes;
+}
+
+static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
+{
+ int ret;
+ u8 value;
+
+ *sink_count = 0;
+ ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
+ if (ret)
+ return ret;
+
+ *sink_count = DP_GET_SINK_COUNT(value);
+ return 0;
+}
+
+static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
+{
+ struct cdn_dp_port *port;
+ int i, lanes;
+
+ for (i = 0; i < dp->ports; i++) {
+ port = dp->port[i];
+ lanes = cdn_dp_get_port_lanes(port);
+ if (lanes)
+ return port;
+ }
+ return NULL;
+}
+
+static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
+ struct cdn_dp_port *port;
+ u8 sink_count = 0;
+
+ if (dp->active_port < 0 || dp->active_port >= dp->ports) {
+ DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
+ return false;
+ }
+
+ port = dp->port[dp->active_port];
+
+ /*
+ * Attempt to read sink count, retry in case the sink may not be ready.
+ *
+ * Sinks are *supposed* to come up within 1ms from an off state, but
+ * some docks need more time to power up.
+ */
+ while (time_before(jiffies, timeout)) {
+ if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
+ return false;
+
+ if (!cdn_dp_get_sink_count(dp, &sink_count))
+ return sink_count ? true : false;
+
+ usleep_range(5000, 10000);
+ }
+
+ DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
+ return false;
+}
+
+static enum drm_connector_status
+cdn_dp_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct cdn_dp_device *dp = connector_to_dp(connector);
+ enum drm_connector_status status = connector_status_disconnected;
+
+ mutex_lock(&dp->lock);
+ if (dp->connected)
+ status = connector_status_connected;
+ mutex_unlock(&dp->lock);
+
+ return status;
+}
+
+static void cdn_dp_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = cdn_dp_connector_detect,
+ .destroy = cdn_dp_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int cdn_dp_connector_get_modes(struct drm_connector *connector)
+{
+ struct cdn_dp_device *dp = connector_to_dp(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ mutex_lock(&dp->lock);
+ edid = dp->edid;
+ if (edid) {
+ DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
+ edid->width_cm, edid->height_cm);
+
+ dp->sink_has_audio = drm_detect_monitor_audio(edid);
+ ret = drm_add_edid_modes(connector, edid);
+ if (ret) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ drm_edid_to_eld(connector, edid);
+ }
+ }
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
+static struct drm_encoder *
+cdn_dp_connector_best_encoder(struct drm_connector *connector)
+{
+ struct cdn_dp_device *dp = connector_to_dp(connector);
+
+ return &dp->encoder;
+}
+
+static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct cdn_dp_device *dp = connector_to_dp(connector);
+ struct drm_display_info *display_info = &dp->connector.display_info;
+ u32 requested, actual, rate, sink_max, source_max = 0;
+ u8 lanes, bpc;
+
+ /* If DP is disconnected, every mode is invalid */
+ if (!dp->connected)
+ return MODE_BAD;
+
+ switch (display_info->bpc) {
+ case 10:
+ bpc = 10;
+ break;
+ case 6:
+ bpc = 6;
+ break;
+ default:
+ bpc = 8;
+ break;
+ }
+
+ requested = mode->clock * bpc * 3 / 1000;
+
+ source_max = dp->lanes;
+ sink_max = drm_dp_max_lane_count(dp->dpcd);
+ lanes = min(source_max, sink_max);
+
+ source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
+ sink_max = drm_dp_max_link_rate(dp->dpcd);
+ rate = min(source_max, sink_max);
+
+ actual = rate * lanes / 100;
+
+ /* efficiency is about 0.8 */
+ actual = actual * 8 / 10;
+
+ if (requested > actual) {
+ DRM_DEV_DEBUG_KMS(dp->dev,
+ "requested=%d, actual=%d, clock=%d\n",
+ requested, actual, mode->clock);
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
+ .get_modes = cdn_dp_connector_get_modes,
+ .best_encoder = cdn_dp_connector_best_encoder,
+ .mode_valid = cdn_dp_connector_mode_valid,
+};
+
+static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
+{
+ int ret;
+ const u32 *iram_data, *dram_data;
+ const struct firmware *fw = dp->fw;
+ const struct cdn_firmware_header *hdr;
+
+ hdr = (struct cdn_firmware_header *)fw->data;
+ if (fw->size != le32_to_cpu(hdr->size_bytes)) {
+ DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
+ return -EINVAL;
+ }
+
+ iram_data = (const u32 *)(fw->data + hdr->header_size);
+ dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
+
+ ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
+ dram_data, hdr->dram_size);
+ if (ret)
+ return ret;
+
+ ret = cdn_dp_set_firmware_active(dp, true);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
+ return ret;
+ }
+
+ return cdn_dp_event_config(dp);
+}
+
+static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
+{
+ int ret;
+
+ if (!cdn_dp_check_sink_connection(dp))
+ return -ENODEV;
+
+ ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
+ DP_RECEIVER_CAP_SIZE);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
+ return ret;
+ }
+
+ kfree(dp->edid);
+ dp->edid = drm_do_get_edid(&dp->connector,
+ cdn_dp_get_edid_block, dp);
+ return 0;
+}
+
+static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
+{
+ union extcon_property_value property;
+ int ret;
+
+ ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
+ (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK);
+ if (ret)
+ return ret;
+
+ if (!port->phy_enabled) {
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
+ ret);
+ goto err_phy;
+ }
+ port->phy_enabled = true;
+ }
+
+ ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
+ DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
+ goto err_power_on;
+ }
+
+ ret = cdn_dp_get_hpd_status(dp);
+ if (ret <= 0) {
+ if (!ret)
+ DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
+ goto err_power_on;
+ }
+
+ ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_TYPEC_POLARITY, &property);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "get property failed\n");
+ goto err_power_on;
+ }
+
+ port->lanes = cdn_dp_get_port_lanes(port);
+ ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
+ ret);
+ goto err_power_on;
+ }
+
+ dp->active_port = port->id;
+ return 0;
+
+err_power_on:
+ if (phy_power_off(port->phy))
+ DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
+ else
+ port->phy_enabled = false;
+
+err_phy:
+ cdn_dp_grf_write(dp, GRF_SOC_CON26,
+ DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
+ return ret;
+}
+
+static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
+ struct cdn_dp_port *port)
+{
+ int ret;
+
+ if (port->phy_enabled) {
+ ret = phy_power_off(port->phy);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
+ return ret;
+ }
+ }
+
+ port->phy_enabled = false;
+ port->lanes = 0;
+ dp->active_port = -1;
+ return 0;
+}
+
+static int cdn_dp_disable(struct cdn_dp_device *dp)
+{
+ int ret, i;
+
+ if (!dp->active)
+ return 0;
+
+ for (i = 0; i < dp->ports; i++)
+ cdn_dp_disable_phy(dp, dp->port[i]);
+
+ ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
+ DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
+ ret);
+ return ret;
+ }
+
+ cdn_dp_set_firmware_active(dp, false);
+ cdn_dp_clk_disable(dp);
+ dp->active = false;
+ dp->link.rate = 0;
+ dp->link.num_lanes = 0;
+ if (!dp->connected) {
+ kfree(dp->edid);
+ dp->edid = NULL;
+ }
+
+ return 0;
+}
+
+static int cdn_dp_enable(struct cdn_dp_device *dp)
+{
+ int ret, i, lanes;
+ struct cdn_dp_port *port;
+
+ port = cdn_dp_connected_port(dp);
+ if (!port) {
+ DRM_DEV_ERROR(dp->dev,
+ "Can't enable without connection\n");
+ return -ENODEV;
+ }
+
+ if (dp->active)
+ return 0;
+
+ ret = cdn_dp_clk_enable(dp);
+ if (ret)
+ return ret;
+
+ ret = cdn_dp_firmware_init(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
+ goto err_clk_disable;
+ }
+
+ /* only enable the port that connected with downstream device */
+ for (i = port->id; i < dp->ports; i++) {
+ port = dp->port[i];
+ lanes = cdn_dp_get_port_lanes(port);
+ if (lanes) {
+ ret = cdn_dp_enable_phy(dp, port);
+ if (ret)
+ continue;
+
+ ret = cdn_dp_get_sink_capability(dp);
+ if (ret) {
+ cdn_dp_disable_phy(dp, port);
+ } else {
+ dp->active = true;
+ dp->lanes = port->lanes;
+ return 0;
+ }
+ }
+ }
+
+err_clk_disable:
+ cdn_dp_clk_disable(dp);
+ return ret;
+}
+
+static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ struct cdn_dp_device *dp = encoder_to_dp(encoder);
+ struct drm_display_info *display_info = &dp->connector.display_info;
+ struct video_info *video = &dp->video_info;
+
+ switch (display_info->bpc) {
+ case 10:
+ video->color_depth = 10;
+ break;
+ case 6:
+ video->color_depth = 6;
+ break;
+ default:
+ video->color_depth = 8;
+ break;
+ }
+
+ video->color_fmt = PXL_RGB;
+ video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+ video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+ memcpy(&dp->mode, adjusted, sizeof(*mode));
+}
+
+static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ struct cdn_dp_port *port = cdn_dp_connected_port(dp);
+ u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
+
+ if (!port || !dp->link.rate || !dp->link.num_lanes)
+ return false;
+
+ if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
+ DP_LINK_STATUS_SIZE)) {
+ DRM_ERROR("Failed to get link status\n");
+ return false;
+ }
+
+ /* if link training is requested we should perform it always */
+ return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
+}
+
+static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
+{
+ struct cdn_dp_device *dp = encoder_to_dp(encoder);
+ int ret, val;
+ struct rockchip_crtc_state *state;
+
+ ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
+ return;
+ }
+
+ DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
+ (ret) ? "LIT" : "BIG");
+ state = to_rockchip_crtc_state(encoder->crtc->state);
+ if (ret) {
+ val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
+ state->output_mode = ROCKCHIP_OUT_MODE_P888;
+ } else {
+ val = DP_SEL_VOP_LIT << 16;
+ state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+ }
+
+ ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
+ if (ret)
+ return;
+
+ mutex_lock(&dp->lock);
+
+ ret = cdn_dp_enable(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
+ ret);
+ goto out;
+ }
+ if (!cdn_dp_check_link_status(dp)) {
+ ret = cdn_dp_train_link(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
+ goto out;
+ }
+ }
+
+ ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
+ goto out;
+ }
+
+ ret = cdn_dp_config_video(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
+ goto out;
+ }
+
+ ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&dp->lock);
+}
+
+static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
+{
+ struct cdn_dp_device *dp = encoder_to_dp(encoder);
+ int ret;
+
+ mutex_lock(&dp->lock);
+ if (dp->active) {
+ ret = cdn_dp_disable(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
+ ret);
+ }
+ }
+ mutex_unlock(&dp->lock);
+
+ /*
+ * In the following 2 cases, we need to run the event_work to re-enable
+ * the DP:
+ * 1. If there is not just one port device is connected, and remove one
+ * device from a port, the DP will be disabled here, at this case,
+ * run the event_work to re-open DP for the other port.
+ * 2. If re-training or re-config failed, the DP will be disabled here.
+ * run the event_work to re-connect it.
+ */
+ if (!dp->connected && cdn_dp_connected_port(dp))
+ schedule_work(&dp->event_work);
+}
+
+static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+ s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
+ .mode_set = cdn_dp_encoder_mode_set,
+ .enable = cdn_dp_encoder_enable,
+ .disable = cdn_dp_encoder_disable,
+ .atomic_check = cdn_dp_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
+{
+ struct device *dev = dp->dev;
+ struct device_node *np = dev->of_node;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(dp->grf)) {
+ DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
+ return PTR_ERR(dp->grf);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dp->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dp->regs)) {
+ DRM_DEV_ERROR(dev, "ioremap reg failed\n");
+ return PTR_ERR(dp->regs);
+ }
+
+ dp->core_clk = devm_clk_get(dev, "core-clk");
+ if (IS_ERR(dp->core_clk)) {
+ DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
+ return PTR_ERR(dp->core_clk);
+ }
+
+ dp->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(dp->pclk)) {
+ DRM_DEV_ERROR(dev, "cannot get pclk\n");
+ return PTR_ERR(dp->pclk);
+ }
+
+ dp->spdif_clk = devm_clk_get(dev, "spdif");
+ if (IS_ERR(dp->spdif_clk)) {
+ DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
+ return PTR_ERR(dp->spdif_clk);
+ }
+
+ dp->grf_clk = devm_clk_get(dev, "grf");
+ if (IS_ERR(dp->grf_clk)) {
+ DRM_DEV_ERROR(dev, "cannot get grf clk\n");
+ return PTR_ERR(dp->grf_clk);
+ }
+
+ dp->spdif_rst = devm_reset_control_get(dev, "spdif");
+ if (IS_ERR(dp->spdif_rst)) {
+ DRM_DEV_ERROR(dev, "no spdif reset control found\n");
+ return PTR_ERR(dp->spdif_rst);
+ }
+
+ dp->dptx_rst = devm_reset_control_get(dev, "dptx");
+ if (IS_ERR(dp->dptx_rst)) {
+ DRM_DEV_ERROR(dev, "no uphy reset control found\n");
+ return PTR_ERR(dp->dptx_rst);
+ }
+
+ dp->core_rst = devm_reset_control_get(dev, "core");
+ if (IS_ERR(dp->core_rst)) {
+ DRM_DEV_ERROR(dev, "no core reset control found\n");
+ return PTR_ERR(dp->core_rst);
+ }
+
+ dp->apb_rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(dp->apb_rst)) {
+ DRM_DEV_ERROR(dev, "no apb reset control found\n");
+ return PTR_ERR(dp->apb_rst);
+ }
+
+ return 0;
+}
+
+static int cdn_dp_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct audio_info audio = {
+ .sample_width = params->sample_width,
+ .sample_rate = params->sample_rate,
+ .channels = params->channels,
+ };
+ int ret;
+
+ mutex_lock(&dp->lock);
+ if (!dp->active) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ switch (daifmt->fmt) {
+ case HDMI_I2S:
+ audio.format = AFMT_I2S;
+ break;
+ case HDMI_SPDIF:
+ audio.format = AFMT_SPDIF;
+ break;
+ default:
+ DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = cdn_dp_audio_config(dp, &audio);
+ if (!ret)
+ dp->audio_info = audio;
+
+out:
+ mutex_unlock(&dp->lock);
+ return ret;
+}
+
+static void cdn_dp_audio_shutdown(struct device *dev, void *data)
+{
+ struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&dp->lock);
+ if (!dp->active)
+ goto out;
+
+ ret = cdn_dp_audio_stop(dp, &dp->audio_info);
+ if (!ret)
+ dp->audio_info.format = AFMT_UNUSED;
+out:
+ mutex_unlock(&dp->lock);
+}
+
+static int cdn_dp_audio_digital_mute(struct device *dev, void *data,
+ bool enable)
+{
+ struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&dp->lock);
+ if (!dp->active) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = cdn_dp_audio_mute(dp, enable);
+
+out:
+ mutex_unlock(&dp->lock);
+ return ret;
+}
+
+static int cdn_dp_audio_get_eld(struct device *dev, void *data,
+ u8 *buf, size_t len)
+{
+ struct cdn_dp_device *dp = dev_get_drvdata(dev);
+
+ memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = cdn_dp_audio_hw_params,
+ .audio_shutdown = cdn_dp_audio_shutdown,
+ .digital_mute = cdn_dp_audio_digital_mute,
+ .get_eld = cdn_dp_audio_get_eld,
+};
+
+static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
+ struct device *dev)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .i2s = 1,
+ .spdif = 1,
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 8,
+ };
+
+ dp->audio_pdev = platform_device_register_data(
+ dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(dp->audio_pdev);
+}
+
+static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
+{
+ int ret;
+ unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
+ unsigned long sleep = 1000;
+
+ WARN_ON(!mutex_is_locked(&dp->lock));
+
+ if (dp->fw_loaded)
+ return 0;
+
+ /* Drop the lock before getting the firmware to avoid blocking boot */
+ mutex_unlock(&dp->lock);
+
+ while (time_before(jiffies, timeout)) {
+ ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
+ if (ret == -ENOENT) {
+ msleep(sleep);
+ sleep *= 2;
+ continue;
+ } else if (ret) {
+ DRM_DEV_ERROR(dp->dev,
+ "failed to request firmware: %d\n", ret);
+ goto out;
+ }
+
+ dp->fw_loaded = true;
+ ret = 0;
+ goto out;
+ }
+
+ DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
+ ret = -ETIMEDOUT;
+out:
+ mutex_lock(&dp->lock);
+ return ret;
+}
+
+static void cdn_dp_pd_event_work(struct work_struct *work)
+{
+ struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
+ event_work);
+ struct drm_connector *connector = &dp->connector;
+ enum drm_connector_status old_status;
+
+ int ret;
+
+ mutex_lock(&dp->lock);
+
+ if (dp->suspended)
+ goto out;
+
+ ret = cdn_dp_request_firmware(dp);
+ if (ret)
+ goto out;
+
+ dp->connected = true;
+
+ /* Not connected, notify userspace to disable the block */
+ if (!cdn_dp_connected_port(dp)) {
+ DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
+ dp->connected = false;
+
+ /* Connected but not enabled, enable the block */
+ } else if (!dp->active) {
+ DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
+ ret = cdn_dp_enable(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
+ dp->connected = false;
+ }
+
+ /* Enabled and connected to a dongle without a sink, notify userspace */
+ } else if (!cdn_dp_check_sink_connection(dp)) {
+ DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
+ dp->connected = false;
+
+ /* Enabled and connected with a sink, re-train if requested */
+ } else if (!cdn_dp_check_link_status(dp)) {
+ unsigned int rate = dp->link.rate;
+ unsigned int lanes = dp->link.num_lanes;
+ struct drm_display_mode *mode = &dp->mode;
+
+ DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
+ ret = cdn_dp_train_link(dp);
+ if (ret) {
+ dp->connected = false;
+ DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
+ goto out;
+ }
+
+ /* If training result is changed, update the video config */
+ if (mode->clock &&
+ (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
+ ret = cdn_dp_config_video(dp);
+ if (ret) {
+ dp->connected = false;
+ DRM_DEV_ERROR(dp->dev,
+ "Failed to config video %d\n",
+ ret);
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&dp->lock);
+
+ old_status = connector->status;
+ connector->status = connector->funcs->detect(connector, false);
+ if (old_status != connector->status)
+ drm_kms_helper_hotplug_event(dp->drm_dev);
+}
+
+static int cdn_dp_pd_event(struct notifier_block *nb,
+ unsigned long event, void *priv)
+{
+ struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
+ event_nb);
+ struct cdn_dp_device *dp = port->dp;
+
+ /*
+ * It would be nice to be able to just do the work inline right here.
+ * However, we need to make a bunch of calls that might sleep in order
+ * to turn on the block/phy, so use a worker instead.
+ */
+ schedule_work(&dp->event_work);
+
+ return NOTIFY_DONE;
+}
+
+static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct cdn_dp_port *port;
+ struct drm_device *drm_dev = data;
+ int ret, i;
+
+ ret = cdn_dp_parse_dt(dp);
+ if (ret < 0)
+ return ret;
+
+ dp->drm_dev = drm_dev;
+ dp->connected = false;
+ dp->active = false;
+ dp->active_port = -1;
+
+ INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
+
+ encoder = &dp->encoder;
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+ dev->of_node);
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize encoder with drm\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
+
+ connector = &dp->connector;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->dpms = DRM_MODE_DPMS_OFF;
+
+ ret = drm_connector_init(drm_dev, connector,
+ &cdn_dp_atomic_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ DRM_ERROR("failed to initialize connector with drm\n");
+ goto err_free_encoder;
+ }
+
+ drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ DRM_ERROR("failed to attach connector and encoder\n");
+ goto err_free_connector;
+ }
+
+ cdn_dp_audio_codec_init(dp, dev);
+
+ for (i = 0; i < dp->ports; i++) {
+ port = dp->port[i];
+
+ port->event_nb.notifier_call = cdn_dp_pd_event;
+ ret = devm_extcon_register_notifier(dp->dev, port->extcon,
+ EXTCON_DISP_DP,
+ &port->event_nb);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "register EXTCON_DISP_DP notifier err\n");
+ goto err_free_connector;
+ }
+ }
+
+ pm_runtime_enable(dev);
+
+ schedule_work(&dp->event_work);
+
+ return 0;
+
+err_free_connector:
+ drm_connector_cleanup(connector);
+err_free_encoder:
+ drm_encoder_cleanup(encoder);
+ return ret;
+}
+
+static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &dp->encoder;
+ struct drm_connector *connector = &dp->connector;
+
+ cancel_work_sync(&dp->event_work);
+ platform_device_unregister(dp->audio_pdev);
+ cdn_dp_encoder_disable(encoder);
+ encoder->funcs->destroy(encoder);
+ connector->funcs->destroy(connector);
+
+ pm_runtime_disable(dev);
+ release_firmware(dp->fw);
+ kfree(dp->edid);
+ dp->edid = NULL;
+}
+
+static const struct component_ops cdn_dp_component_ops = {
+ .bind = cdn_dp_bind,
+ .unbind = cdn_dp_unbind,
+};
+
+int cdn_dp_suspend(struct device *dev)
+{
+ struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ int ret = 0;
+
+ mutex_lock(&dp->lock);
+ if (dp->active)
+ ret = cdn_dp_disable(dp);
+ dp->suspended = true;
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
+int cdn_dp_resume(struct device *dev)
+{
+ struct cdn_dp_device *dp = dev_get_drvdata(dev);
+
+ mutex_lock(&dp->lock);
+ dp->suspended = false;
+ if (dp->fw_loaded)
+ schedule_work(&dp->event_work);
+ mutex_unlock(&dp->lock);
+
+ return 0;
+}
+
+static int cdn_dp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ struct cdn_dp_data *dp_data;
+ struct cdn_dp_port *port;
+ struct cdn_dp_device *dp;
+ struct extcon_dev *extcon;
+ struct phy *phy;
+ int i;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+ dp->dev = dev;
+
+ match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
+ dp_data = (struct cdn_dp_data *)match->data;
+
+ for (i = 0; i < dp_data->max_phy; i++) {
+ extcon = extcon_get_edev_by_phandle(dev, i);
+ phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
+
+ if (PTR_ERR(extcon) == -EPROBE_DEFER ||
+ PTR_ERR(phy) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (IS_ERR(extcon) || IS_ERR(phy))
+ continue;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ port->extcon = extcon;
+ port->phy = phy;
+ port->dp = dp;
+ port->id = i;
+ dp->port[dp->ports++] = port;
+ }
+
+ if (!dp->ports) {
+ DRM_DEV_ERROR(dev, "missing extcon or phy\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&dp->lock);
+ dev_set_drvdata(dev, dp);
+
+ return component_add(dev, &cdn_dp_component_ops);
+}
+
+static int cdn_dp_remove(struct platform_device *pdev)
+{
+ struct cdn_dp_device *dp = platform_get_drvdata(pdev);
+
+ cdn_dp_suspend(dp->dev);
+ component_del(&pdev->dev, &cdn_dp_component_ops);
+
+ return 0;
+}
+
+static void cdn_dp_shutdown(struct platform_device *pdev)
+{
+ struct cdn_dp_device *dp = platform_get_drvdata(pdev);
+
+ cdn_dp_suspend(dp->dev);
+}
+
+static const struct dev_pm_ops cdn_dp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
+ cdn_dp_resume)
+};
+
+static struct platform_driver cdn_dp_driver = {
+ .probe = cdn_dp_probe,
+ .remove = cdn_dp_remove,
+ .shutdown = cdn_dp_shutdown,
+ .driver = {
+ .name = "cdn-dp",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cdn_dp_dt_ids),
+ .pm = &cdn_dp_pm_ops,
+ },
+};
+
+module_platform_driver(cdn_dp_driver);
+
+MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_DESCRIPTION("cdn DP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
new file mode 100644
index 000000000000..f57e296401b8
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 Chris Zhong <zyw@rock-chips.com>
+ * Copyright (C) 2016 ROCKCHIP, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CDN_DP_CORE_H
+#define _CDN_DP_CORE_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
+#include "rockchip_drm_drv.h"
+
+#define MAX_PHY 2
+
+enum audio_format {
+ AFMT_I2S = 0,
+ AFMT_SPDIF = 1,
+ AFMT_UNUSED,
+};
+
+struct audio_info {
+ enum audio_format format;
+ int sample_rate;
+ int channels;
+ int sample_width;
+};
+
+enum vic_pxl_encoding_format {
+ PXL_RGB = 0x1,
+ YCBCR_4_4_4 = 0x2,
+ YCBCR_4_2_2 = 0x4,
+ YCBCR_4_2_0 = 0x8,
+ Y_ONLY = 0x10,
+};
+
+struct video_info {
+ bool h_sync_polarity;
+ bool v_sync_polarity;
+ bool interlaced;
+ int color_depth;
+ enum vic_pxl_encoding_format color_fmt;
+};
+
+struct cdn_firmware_header {
+ u32 size_bytes; /* size of the entire header+image(s) in bytes */
+ u32 header_size; /* size of just the header in bytes */
+ u32 iram_size; /* size of iram */
+ u32 dram_size; /* size of dram */
+};
+
+struct cdn_dp_port {
+ struct cdn_dp_device *dp;
+ struct notifier_block event_nb;
+ struct extcon_dev *extcon;
+ struct phy *phy;
+ u8 lanes;
+ bool phy_enabled;
+ u8 id;
+};
+
+struct cdn_dp_device {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct drm_display_mode mode;
+ struct platform_device *audio_pdev;
+ struct work_struct event_work;
+ struct edid *edid;
+
+ struct mutex lock;
+ bool connected;
+ bool active;
+ bool suspended;
+
+ const struct firmware *fw; /* cdn dp firmware */
+ unsigned int fw_version; /* cdn fw version */
+ bool fw_loaded;
+
+ void __iomem *regs;
+ struct regmap *grf;
+ struct clk *core_clk;
+ struct clk *pclk;
+ struct clk *spdif_clk;
+ struct clk *grf_clk;
+ struct reset_control *spdif_rst;
+ struct reset_control *dptx_rst;
+ struct reset_control *apb_rst;
+ struct reset_control *core_rst;
+ struct audio_info audio_info;
+ struct video_info video_info;
+ struct drm_dp_link link;
+ struct cdn_dp_port *port[MAX_PHY];
+ u8 ports;
+ u8 lanes;
+ int active_port;
+
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ bool sink_has_audio;
+};
+#endif /* _CDN_DP_CORE_H */
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
new file mode 100644
index 000000000000..319dbbaa3609
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -0,0 +1,979 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "cdn-dp-core.h"
+#include "cdn-dp-reg.h"
+
+#define CDN_DP_SPDIF_CLK 200000000
+#define FW_ALIVE_TIMEOUT_US 1000000
+#define MAILBOX_RETRY_US 1000
+#define MAILBOX_TIMEOUT_US 5000000
+#define LINK_TRAINING_RETRY_MS 20
+#define LINK_TRAINING_TIMEOUT_MS 500
+
+void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, u32 clk)
+{
+ writel(clk / 1000000, dp->regs + SW_CLK_H);
+}
+
+void cdn_dp_clock_reset(struct cdn_dp_device *dp)
+{
+ u32 val;
+
+ val = DPTX_FRMR_DATA_CLK_RSTN_EN |
+ DPTX_FRMR_DATA_CLK_EN |
+ DPTX_PHY_DATA_RSTN_EN |
+ DPTX_PHY_DATA_CLK_EN |
+ DPTX_PHY_CHAR_RSTN_EN |
+ DPTX_PHY_CHAR_CLK_EN |
+ SOURCE_AUX_SYS_CLK_RSTN_EN |
+ SOURCE_AUX_SYS_CLK_EN |
+ DPTX_SYS_CLK_RSTN_EN |
+ DPTX_SYS_CLK_EN |
+ CFG_DPTX_VIF_CLK_RSTN_EN |
+ CFG_DPTX_VIF_CLK_EN;
+ writel(val, dp->regs + SOURCE_DPTX_CAR);
+
+ val = SOURCE_PHY_RSTN_EN | SOURCE_PHY_CLK_EN;
+ writel(val, dp->regs + SOURCE_PHY_CAR);
+
+ val = SOURCE_PKT_SYS_RSTN_EN |
+ SOURCE_PKT_SYS_CLK_EN |
+ SOURCE_PKT_DATA_RSTN_EN |
+ SOURCE_PKT_DATA_CLK_EN;
+ writel(val, dp->regs + SOURCE_PKT_CAR);
+
+ val = SPDIF_CDR_CLK_RSTN_EN |
+ SPDIF_CDR_CLK_EN |
+ SOURCE_AIF_SYS_RSTN_EN |
+ SOURCE_AIF_SYS_CLK_EN |
+ SOURCE_AIF_CLK_RSTN_EN |
+ SOURCE_AIF_CLK_EN;
+ writel(val, dp->regs + SOURCE_AIF_CAR);
+
+ val = SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN |
+ SOURCE_CIPHER_SYS_CLK_EN |
+ SOURCE_CIPHER_CHAR_CLK_RSTN_EN |
+ SOURCE_CIPHER_CHAR_CLK_EN;
+ writel(val, dp->regs + SOURCE_CIPHER_CAR);
+
+ val = SOURCE_CRYPTO_SYS_CLK_RSTN_EN |
+ SOURCE_CRYPTO_SYS_CLK_EN;
+ writel(val, dp->regs + SOURCE_CRYPTO_CAR);
+
+ /* enable Mailbox and PIF interrupt */
+ writel(0, dp->regs + APB_INT_MASK);
+}
+
+static int cdn_dp_mailbox_read(struct cdn_dp_device *dp)
+{
+ int val, ret;
+
+ ret = readx_poll_timeout(readl, dp->regs + MAILBOX_EMPTY_ADDR,
+ val, !val, MAILBOX_RETRY_US,
+ MAILBOX_TIMEOUT_US);
+ if (ret < 0)
+ return ret;
+
+ return readl(dp->regs + MAILBOX0_RD_DATA) & 0xff;
+}
+
+static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val)
+{
+ int ret, full;
+
+ ret = readx_poll_timeout(readl, dp->regs + MAILBOX_FULL_ADDR,
+ full, !full, MAILBOX_RETRY_US,
+ MAILBOX_TIMEOUT_US);
+ if (ret < 0)
+ return ret;
+
+ writel(val, dp->regs + MAILBOX0_WR_DATA);
+
+ return 0;
+}
+
+static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
+ u8 module_id, u8 opcode,
+ u8 req_size)
+{
+ u32 mbox_size, i;
+ u8 header[4];
+ int ret;
+
+ /* read the header of the message */
+ for (i = 0; i < 4; i++) {
+ ret = cdn_dp_mailbox_read(dp);
+ if (ret < 0)
+ return ret;
+
+ header[i] = ret;
+ }
+
+ mbox_size = (header[2] << 8) | header[3];
+
+ if (opcode != header[0] || module_id != header[1] ||
+ req_size != mbox_size) {
+ /*
+ * If the message in mailbox is not what we want, we need to
+ * clear the mailbox by reading its contents.
+ */
+ for (i = 0; i < mbox_size; i++)
+ if (cdn_dp_mailbox_read(dp) < 0)
+ break;
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
+ u8 *buff, u8 buff_size)
+{
+ u32 i;
+ int ret;
+
+ for (i = 0; i < buff_size; i++) {
+ ret = cdn_dp_mailbox_read(dp);
+ if (ret < 0)
+ return ret;
+
+ buff[i] = ret;
+ }
+
+ return 0;
+}
+
+static int cdn_dp_mailbox_send(struct cdn_dp_device *dp, u8 module_id,
+ u8 opcode, u16 size, u8 *message)
+{
+ u8 header[4];
+ int ret, i;
+
+ header[0] = opcode;
+ header[1] = module_id;
+ header[2] = (size >> 8) & 0xff;
+ header[3] = size & 0xff;
+
+ for (i = 0; i < 4; i++) {
+ ret = cdp_dp_mailbox_write(dp, header[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < size; i++) {
+ ret = cdp_dp_mailbox_write(dp, message[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdn_dp_reg_write(struct cdn_dp_device *dp, u16 addr, u32 val)
+{
+ u8 msg[6];
+
+ msg[0] = (addr >> 8) & 0xff;
+ msg[1] = addr & 0xff;
+ msg[2] = (val >> 24) & 0xff;
+ msg[3] = (val >> 16) & 0xff;
+ msg[4] = (val >> 8) & 0xff;
+ msg[5] = val & 0xff;
+ return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_REGISTER,
+ sizeof(msg), msg);
+}
+
+static int cdn_dp_reg_write_bit(struct cdn_dp_device *dp, u16 addr,
+ u8 start_bit, u8 bits_no, u32 val)
+{
+ u8 field[8];
+
+ field[0] = (addr >> 8) & 0xff;
+ field[1] = addr & 0xff;
+ field[2] = start_bit;
+ field[3] = bits_no;
+ field[4] = (val >> 24) & 0xff;
+ field[5] = (val >> 16) & 0xff;
+ field[6] = (val >> 8) & 0xff;
+ field[7] = val & 0xff;
+
+ return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_FIELD,
+ sizeof(field), field);
+}
+
+int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len)
+{
+ u8 msg[5], reg[5];
+ int ret;
+
+ msg[0] = (len >> 8) & 0xff;
+ msg[1] = len & 0xff;
+ msg[2] = (addr >> 16) & 0xff;
+ msg[3] = (addr >> 8) & 0xff;
+ msg[4] = addr & 0xff;
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_DPCD,
+ sizeof(msg), msg);
+ if (ret)
+ goto err_dpcd_read;
+
+ ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+ DPTX_READ_DPCD,
+ sizeof(reg) + len);
+ if (ret)
+ goto err_dpcd_read;
+
+ ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
+ if (ret)
+ goto err_dpcd_read;
+
+ ret = cdn_dp_mailbox_read_receive(dp, data, len);
+
+err_dpcd_read:
+ return ret;
+}
+
+int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value)
+{
+ u8 msg[6], reg[5];
+ int ret;
+
+ msg[0] = 0;
+ msg[1] = 1;
+ msg[2] = (addr >> 16) & 0xff;
+ msg[3] = (addr >> 8) & 0xff;
+ msg[4] = addr & 0xff;
+ msg[5] = value;
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_DPCD,
+ sizeof(msg), msg);
+ if (ret)
+ goto err_dpcd_write;
+
+ ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+ DPTX_WRITE_DPCD, sizeof(reg));
+ if (ret)
+ goto err_dpcd_write;
+
+ ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
+ if (ret)
+ goto err_dpcd_write;
+
+ if (addr != (reg[2] << 16 | reg[3] << 8 | reg[4]))
+ ret = -EINVAL;
+
+err_dpcd_write:
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "dpcd write failed: %d\n", ret);
+ return ret;
+}
+
+int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
+ u32 i_size, const u32 *d_mem, u32 d_size)
+{
+ u32 reg;
+ int i, ret;
+
+ /* reset ucpu before load firmware*/
+ writel(APB_IRAM_PATH | APB_DRAM_PATH | APB_XT_RESET,
+ dp->regs + APB_CTRL);
+
+ for (i = 0; i < i_size; i += 4)
+ writel(*i_mem++, dp->regs + ADDR_IMEM + i);
+
+ for (i = 0; i < d_size; i += 4)
+ writel(*d_mem++, dp->regs + ADDR_DMEM + i);
+
+ /* un-reset ucpu */
+ writel(0, dp->regs + APB_CTRL);
+
+ /* check the keep alive register to make sure fw working */
+ ret = readx_poll_timeout(readl, dp->regs + KEEP_ALIVE,
+ reg, reg, 2000, FW_ALIVE_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dp->dev, "failed to loaded the FW reg = %x\n",
+ reg);
+ return -EINVAL;
+ }
+
+ reg = readl(dp->regs + VER_L) & 0xff;
+ dp->fw_version = reg;
+ reg = readl(dp->regs + VER_H) & 0xff;
+ dp->fw_version |= reg << 8;
+ reg = readl(dp->regs + VER_LIB_L_ADDR) & 0xff;
+ dp->fw_version |= reg << 16;
+ reg = readl(dp->regs + VER_LIB_H_ADDR) & 0xff;
+ dp->fw_version |= reg << 24;
+
+ dev_dbg(dp->dev, "firmware version: %x\n", dp->fw_version);
+
+ return 0;
+}
+
+int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable)
+{
+ u8 msg[5];
+ int ret, i;
+
+ msg[0] = GENERAL_MAIN_CONTROL;
+ msg[1] = MB_MODULE_ID_GENERAL;
+ msg[2] = 0;
+ msg[3] = 1;
+ msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
+
+ for (i = 0; i < sizeof(msg); i++) {
+ ret = cdp_dp_mailbox_write(dp, msg[i]);
+ if (ret)
+ goto err_set_firmware_active;
+ }
+
+ /* read the firmware state */
+ for (i = 0; i < sizeof(msg); i++) {
+ ret = cdn_dp_mailbox_read(dp);
+ if (ret < 0)
+ goto err_set_firmware_active;
+
+ msg[i] = ret;
+ }
+
+ ret = 0;
+
+err_set_firmware_active:
+ if (ret < 0)
+ DRM_DEV_ERROR(dp->dev, "set firmware active failed\n");
+ return ret;
+}
+
+int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip)
+{
+ u8 msg[8];
+ int ret;
+
+ msg[0] = CDN_DP_MAX_LINK_RATE;
+ msg[1] = lanes | SCRAMBLER_EN;
+ msg[2] = VOLTAGE_LEVEL_2;
+ msg[3] = PRE_EMPHASIS_LEVEL_3;
+ msg[4] = PTS1 | PTS2 | PTS3 | PTS4;
+ msg[5] = FAST_LT_NOT_SUPPORT;
+ msg[6] = flip ? LANE_MAPPING_FLIPPED : LANE_MAPPING_NORMAL;
+ msg[7] = ENHANCED;
+
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
+ DPTX_SET_HOST_CAPABILITIES,
+ sizeof(msg), msg);
+ if (ret)
+ goto err_set_host_cap;
+
+ ret = cdn_dp_reg_write(dp, DP_AUX_SWAP_INVERSION_CONTROL,
+ AUX_HOST_INVERT);
+
+err_set_host_cap:
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "set host cap failed: %d\n", ret);
+ return ret;
+}
+
+int cdn_dp_event_config(struct cdn_dp_device *dp)
+{
+ u8 msg[5];
+ int ret;
+
+ memset(msg, 0, sizeof(msg));
+
+ msg[0] = DPTX_EVENT_ENABLE_HPD | DPTX_EVENT_ENABLE_TRAINING;
+
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_ENABLE_EVENT,
+ sizeof(msg), msg);
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "set event config failed: %d\n", ret);
+
+ return ret;
+}
+
+u32 cdn_dp_get_event(struct cdn_dp_device *dp)
+{
+ return readl(dp->regs + SW_EVENTS0);
+}
+
+int cdn_dp_get_hpd_status(struct cdn_dp_device *dp)
+{
+ u8 status;
+ int ret;
+
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_HPD_STATE,
+ 0, NULL);
+ if (ret)
+ goto err_get_hpd;
+
+ ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+ DPTX_HPD_STATE, sizeof(status));
+ if (ret)
+ goto err_get_hpd;
+
+ ret = cdn_dp_mailbox_read_receive(dp, &status, sizeof(status));
+ if (ret)
+ goto err_get_hpd;
+
+ return status;
+
+err_get_hpd:
+ DRM_DEV_ERROR(dp->dev, "get hpd status failed: %d\n", ret);
+ return ret;
+}
+
+int cdn_dp_get_edid_block(void *data, u8 *edid,
+ unsigned int block, size_t length)
+{
+ struct cdn_dp_device *dp = data;
+ u8 msg[2], reg[2], i;
+ int ret;
+
+ for (i = 0; i < 4; i++) {
+ msg[0] = block / 2;
+ msg[1] = block % 2;
+
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_GET_EDID,
+ sizeof(msg), msg);
+ if (ret)
+ continue;
+
+ ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+ DPTX_GET_EDID,
+ sizeof(reg) + length);
+ if (ret)
+ continue;
+
+ ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg));
+ if (ret)
+ continue;
+
+ ret = cdn_dp_mailbox_read_receive(dp, edid, length);
+ if (ret)
+ continue;
+
+ if (reg[0] == length && reg[1] == block / 2)
+ break;
+ }
+
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "get block[%d] edid failed: %d\n", block,
+ ret);
+
+ return ret;
+}
+
+static int cdn_dp_training_start(struct cdn_dp_device *dp)
+{
+ unsigned long timeout;
+ u8 msg, event[2];
+ int ret;
+
+ msg = LINK_TRAINING_RUN;
+
+ /* start training */
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_TRAINING_CONTROL,
+ sizeof(msg), &msg);
+ if (ret)
+ goto err_training_start;
+
+ timeout = jiffies + msecs_to_jiffies(LINK_TRAINING_TIMEOUT_MS);
+ while (time_before(jiffies, timeout)) {
+ msleep(LINK_TRAINING_RETRY_MS);
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
+ DPTX_READ_EVENT, 0, NULL);
+ if (ret)
+ goto err_training_start;
+
+ ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+ DPTX_READ_EVENT,
+ sizeof(event));
+ if (ret)
+ goto err_training_start;
+
+ ret = cdn_dp_mailbox_read_receive(dp, event, sizeof(event));
+ if (ret)
+ goto err_training_start;
+
+ if (event[1] & EQ_PHASE_FINISHED)
+ return 0;
+ }
+
+ ret = -ETIMEDOUT;
+
+err_training_start:
+ DRM_DEV_ERROR(dp->dev, "training failed: %d\n", ret);
+ return ret;
+}
+
+static int cdn_dp_get_training_status(struct cdn_dp_device *dp)
+{
+ u8 status[10];
+ int ret;
+
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_LINK_STAT,
+ 0, NULL);
+ if (ret)
+ goto err_get_training_status;
+
+ ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+ DPTX_READ_LINK_STAT,
+ sizeof(status));
+ if (ret)
+ goto err_get_training_status;
+
+ ret = cdn_dp_mailbox_read_receive(dp, status, sizeof(status));
+ if (ret)
+ goto err_get_training_status;
+
+ dp->link.rate = status[0];
+ dp->link.num_lanes = status[1];
+
+err_get_training_status:
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "get training status failed: %d\n", ret);
+ return ret;
+}
+
+int cdn_dp_train_link(struct cdn_dp_device *dp)
+{
+ int ret;
+
+ ret = cdn_dp_training_start(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to start training %d\n", ret);
+ return ret;
+ }
+
+ ret = cdn_dp_get_training_status(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to get training stat %d\n", ret);
+ return ret;
+ }
+
+ DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate,
+ dp->link.num_lanes);
+ return ret;
+}
+
+int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active)
+{
+ u8 msg;
+ int ret;
+
+ msg = !!active;
+
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_SET_VIDEO,
+ sizeof(msg), &msg);
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "set video status failed: %d\n", ret);
+
+ return ret;
+}
+
+static int cdn_dp_get_msa_misc(struct video_info *video,
+ struct drm_display_mode *mode)
+{
+ u32 msa_misc;
+ u8 val[2] = {0};
+
+ switch (video->color_fmt) {
+ case PXL_RGB:
+ case Y_ONLY:
+ val[0] = 0;
+ break;
+ /* set YUV default color space conversion to BT601 */
+ case YCBCR_4_4_4:
+ val[0] = 6 + BT_601 * 8;
+ break;
+ case YCBCR_4_2_2:
+ val[0] = 5 + BT_601 * 8;
+ break;
+ case YCBCR_4_2_0:
+ val[0] = 5;
+ break;
+ };
+
+ switch (video->color_depth) {
+ case 6:
+ val[1] = 0;
+ break;
+ case 8:
+ val[1] = 1;
+ break;
+ case 10:
+ val[1] = 2;
+ break;
+ case 12:
+ val[1] = 3;
+ break;
+ case 16:
+ val[1] = 4;
+ break;
+ };
+
+ msa_misc = 2 * val[0] + 32 * val[1] +
+ ((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
+
+ return msa_misc;
+}
+
+int cdn_dp_config_video(struct cdn_dp_device *dp)
+{
+ struct video_info *video = &dp->video_info;
+ struct drm_display_mode *mode = &dp->mode;
+ u64 symbol;
+ u32 val, link_rate, rem;
+ u8 bit_per_pix, tu_size_reg = TU_SIZE;
+ int ret;
+
+ bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
+ (video->color_depth * 2) : (video->color_depth * 3);
+
+ link_rate = drm_dp_bw_code_to_link_rate(dp->link.rate) / 1000;
+
+ ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
+ if (ret)
+ goto err_config_video;
+
+ ret = cdn_dp_reg_write(dp, HSYNC2VSYNC_POL_CTRL, 0);
+ if (ret)
+ goto err_config_video;
+
+ /*
+ * get a best tu_size and valid symbol:
+ * 1. chose Lclk freq(162Mhz, 270Mhz, 540Mhz), set TU to 32
+ * 2. calculate VS(valid symbol) = TU * Pclk * Bpp / (Lclk * Lanes)
+ * 3. if VS > *.85 or VS < *.1 or VS < 2 or TU < VS + 4, then set
+ * TU += 2 and repeat 2nd step.
+ */
+ do {
+ tu_size_reg += 2;
+ symbol = tu_size_reg * mode->clock * bit_per_pix;
+ do_div(symbol, dp->link.num_lanes * link_rate * 8);
+ rem = do_div(symbol, 1000);
+ if (tu_size_reg > 64) {
+ ret = -EINVAL;
+ goto err_config_video;
+ }
+ } while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
+ (rem > 850) || (rem < 100));
+
+ val = symbol + (tu_size_reg << 8);
+ val |= TU_CNT_RST_EN;
+ ret = cdn_dp_reg_write(dp, DP_FRAMER_TU, val);
+ if (ret)
+ goto err_config_video;
+
+ /* set the FIFO Buffer size */
+ val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
+ val /= (dp->link.num_lanes * link_rate);
+ val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
+ val += 2;
+ ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val);
+
+ switch (video->color_depth) {
+ case 6:
+ val = BCS_6;
+ break;
+ case 8:
+ val = BCS_8;
+ break;
+ case 10:
+ val = BCS_10;
+ break;
+ case 12:
+ val = BCS_12;
+ break;
+ case 16:
+ val = BCS_16;
+ break;
+ };
+
+ val += video->color_fmt << 8;
+ ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val);
+ if (ret)
+ goto err_config_video;
+
+ val = video->h_sync_polarity ? DP_FRAMER_SP_HSP : 0;
+ val |= video->v_sync_polarity ? DP_FRAMER_SP_VSP : 0;
+ ret = cdn_dp_reg_write(dp, DP_FRAMER_SP, val);
+ if (ret)
+ goto err_config_video;
+
+ val = (mode->hsync_start - mode->hdisplay) << 16;
+ val |= mode->htotal - mode->hsync_end;
+ ret = cdn_dp_reg_write(dp, DP_FRONT_BACK_PORCH, val);
+ if (ret)
+ goto err_config_video;
+
+ val = mode->hdisplay * bit_per_pix / 8;
+ ret = cdn_dp_reg_write(dp, DP_BYTE_COUNT, val);
+ if (ret)
+ goto err_config_video;
+
+ val = mode->htotal | ((mode->htotal - mode->hsync_start) << 16);
+ ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_0, val);
+ if (ret)
+ goto err_config_video;
+
+ val = mode->hsync_end - mode->hsync_start;
+ val |= (mode->hdisplay << 16) | (video->h_sync_polarity << 15);
+ ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_1, val);
+ if (ret)
+ goto err_config_video;
+
+ val = mode->vtotal;
+ val |= (mode->vtotal - mode->vsync_start) << 16;
+ ret = cdn_dp_reg_write(dp, MSA_VERTICAL_0, val);
+ if (ret)
+ goto err_config_video;
+
+ val = mode->vsync_end - mode->vsync_start;
+ val |= (mode->vdisplay << 16) | (video->v_sync_polarity << 15);
+ ret = cdn_dp_reg_write(dp, MSA_VERTICAL_1, val);
+ if (ret)
+ goto err_config_video;
+
+ val = cdn_dp_get_msa_misc(video, mode);
+ ret = cdn_dp_reg_write(dp, MSA_MISC, val);
+ if (ret)
+ goto err_config_video;
+
+ ret = cdn_dp_reg_write(dp, STREAM_CONFIG, 1);
+ if (ret)
+ goto err_config_video;
+
+ val = mode->hsync_end - mode->hsync_start;
+ val |= mode->hdisplay << 16;
+ ret = cdn_dp_reg_write(dp, DP_HORIZONTAL, val);
+ if (ret)
+ goto err_config_video;
+
+ val = mode->vdisplay;
+ val |= (mode->vtotal - mode->vsync_start) << 16;
+ ret = cdn_dp_reg_write(dp, DP_VERTICAL_0, val);
+ if (ret)
+ goto err_config_video;
+
+ val = mode->vtotal;
+ ret = cdn_dp_reg_write(dp, DP_VERTICAL_1, val);
+ if (ret)
+ goto err_config_video;
+
+ ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 2, 1, 0);
+
+err_config_video:
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "config video failed: %d\n", ret);
+ return ret;
+}
+
+int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
+{
+ u32 val;
+ int ret;
+
+ ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "audio stop failed: %d\n", ret);
+ return ret;
+ }
+
+ val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
+ val |= SPDIF_FIFO_MID_RANGE(0xe0);
+ val |= SPDIF_JITTER_THRSH(0xe0);
+ val |= SPDIF_JITTER_AVG_WIN(7);
+ writel(val, dp->regs + SPDIF_CTRL_ADDR);
+
+ /* clearn the audio config and reset */
+ writel(0, dp->regs + AUDIO_SRC_CNTL);
+ writel(0, dp->regs + AUDIO_SRC_CNFG);
+ writel(AUDIO_SW_RST, dp->regs + AUDIO_SRC_CNTL);
+ writel(0, dp->regs + AUDIO_SRC_CNTL);
+
+ /* reset smpl2pckt component */
+ writel(0, dp->regs + SMPL2PKT_CNTL);
+ writel(AUDIO_SW_RST, dp->regs + SMPL2PKT_CNTL);
+ writel(0, dp->regs + SMPL2PKT_CNTL);
+
+ /* reset FIFO */
+ writel(AUDIO_SW_RST, dp->regs + FIFO_CNTL);
+ writel(0, dp->regs + FIFO_CNTL);
+
+ if (audio->format == AFMT_SPDIF)
+ clk_disable_unprepare(dp->spdif_clk);
+
+ return 0;
+}
+
+int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable)
+{
+ int ret;
+
+ ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 4, 1, enable);
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "audio mute failed: %d\n", ret);
+
+ return ret;
+}
+
+static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp,
+ struct audio_info *audio)
+{
+ int sub_pckt_num = 1, i2s_port_en_val = 0xf, i;
+ u32 val;
+
+ if (audio->channels == 2) {
+ if (dp->link.num_lanes == 1)
+ sub_pckt_num = 2;
+ else
+ sub_pckt_num = 4;
+
+ i2s_port_en_val = 1;
+ } else if (audio->channels == 4) {
+ i2s_port_en_val = 3;
+ }
+
+ writel(0x0, dp->regs + SPDIF_CTRL_ADDR);
+
+ writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
+
+ val = MAX_NUM_CH(audio->channels);
+ val |= NUM_OF_I2S_PORTS(audio->channels);
+ val |= AUDIO_TYPE_LPCM;
+ val |= CFG_SUB_PCKT_NUM(sub_pckt_num);
+ writel(val, dp->regs + SMPL2PKT_CNFG);
+
+ if (audio->sample_width == 16)
+ val = 0;
+ else if (audio->sample_width == 24)
+ val = 1 << 9;
+ else
+ val = 2 << 9;
+
+ val |= AUDIO_CH_NUM(audio->channels);
+ val |= I2S_DEC_PORT_EN(i2s_port_en_val);
+ val |= TRANS_SMPL_WIDTH_32;
+ writel(val, dp->regs + AUDIO_SRC_CNFG);
+
+ for (i = 0; i < (audio->channels + 1) / 2; i++) {
+ if (audio->sample_width == 16)
+ val = (0x02 << 8) | (0x02 << 20);
+ else if (audio->sample_width == 24)
+ val = (0x0b << 8) | (0x0b << 20);
+
+ val |= ((2 * i) << 4) | ((2 * i + 1) << 16);
+ writel(val, dp->regs + STTS_BIT_CH(i));
+ }
+
+ switch (audio->sample_rate) {
+ case 32000:
+ val = SAMPLING_FREQ(3) |
+ ORIGINAL_SAMP_FREQ(0xc);
+ break;
+ case 44100:
+ val = SAMPLING_FREQ(0) |
+ ORIGINAL_SAMP_FREQ(0xf);
+ break;
+ case 48000:
+ val = SAMPLING_FREQ(2) |
+ ORIGINAL_SAMP_FREQ(0xd);
+ break;
+ case 88200:
+ val = SAMPLING_FREQ(8) |
+ ORIGINAL_SAMP_FREQ(0x7);
+ break;
+ case 96000:
+ val = SAMPLING_FREQ(0xa) |
+ ORIGINAL_SAMP_FREQ(5);
+ break;
+ case 176400:
+ val = SAMPLING_FREQ(0xc) |
+ ORIGINAL_SAMP_FREQ(3);
+ break;
+ case 192000:
+ val = SAMPLING_FREQ(0xe) |
+ ORIGINAL_SAMP_FREQ(1);
+ break;
+ }
+ val |= 4;
+ writel(val, dp->regs + COM_CH_STTS_BITS);
+
+ writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
+ writel(I2S_DEC_START, dp->regs + AUDIO_SRC_CNTL);
+}
+
+static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
+{
+ u32 val;
+
+ val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
+ val |= SPDIF_FIFO_MID_RANGE(0xe0);
+ val |= SPDIF_JITTER_THRSH(0xe0);
+ val |= SPDIF_JITTER_AVG_WIN(7);
+ writel(val, dp->regs + SPDIF_CTRL_ADDR);
+
+ writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
+
+ val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
+ writel(val, dp->regs + SMPL2PKT_CNFG);
+ writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
+
+ val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
+ val |= SPDIF_FIFO_MID_RANGE(0xe0);
+ val |= SPDIF_JITTER_THRSH(0xe0);
+ val |= SPDIF_JITTER_AVG_WIN(7);
+ writel(val, dp->regs + SPDIF_CTRL_ADDR);
+
+ clk_prepare_enable(dp->spdif_clk);
+ clk_set_rate(dp->spdif_clk, CDN_DP_SPDIF_CLK);
+}
+
+int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio)
+{
+ int ret;
+
+ /* reset the spdif clk before config */
+ if (audio->format == AFMT_SPDIF) {
+ reset_control_assert(dp->spdif_rst);
+ reset_control_deassert(dp->spdif_rst);
+ }
+
+ ret = cdn_dp_reg_write(dp, CM_LANE_CTRL, LANE_REF_CYC);
+ if (ret)
+ goto err_audio_config;
+
+ ret = cdn_dp_reg_write(dp, CM_CTRL, 0);
+ if (ret)
+ goto err_audio_config;
+
+ if (audio->format == AFMT_I2S)
+ cdn_dp_audio_config_i2s(dp, audio);
+ else if (audio->format == AFMT_SPDIF)
+ cdn_dp_audio_config_spdif(dp);
+
+ ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, AUDIO_PACK_EN);
+
+err_audio_config:
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "audio config failed: %d\n", ret);
+ return ret;
+}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.h b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
new file mode 100644
index 000000000000..b5f215324694
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CDN_DP_REG_H
+#define _CDN_DP_REG_H
+
+#include <linux/bitops.h>
+
+#define ADDR_IMEM 0x10000
+#define ADDR_DMEM 0x20000
+
+/* APB CFG addr */
+#define APB_CTRL 0
+#define XT_INT_CTRL 0x04
+#define MAILBOX_FULL_ADDR 0x08
+#define MAILBOX_EMPTY_ADDR 0x0c
+#define MAILBOX0_WR_DATA 0x10
+#define MAILBOX0_RD_DATA 0x14
+#define KEEP_ALIVE 0x18
+#define VER_L 0x1c
+#define VER_H 0x20
+#define VER_LIB_L_ADDR 0x24
+#define VER_LIB_H_ADDR 0x28
+#define SW_DEBUG_L 0x2c
+#define SW_DEBUG_H 0x30
+#define MAILBOX_INT_MASK 0x34
+#define MAILBOX_INT_STATUS 0x38
+#define SW_CLK_L 0x3c
+#define SW_CLK_H 0x40
+#define SW_EVENTS0 0x44
+#define SW_EVENTS1 0x48
+#define SW_EVENTS2 0x4c
+#define SW_EVENTS3 0x50
+#define XT_OCD_CTRL 0x60
+#define APB_INT_MASK 0x6c
+#define APB_STATUS_MASK 0x70
+
+/* audio decoder addr */
+#define AUDIO_SRC_CNTL 0x30000
+#define AUDIO_SRC_CNFG 0x30004
+#define COM_CH_STTS_BITS 0x30008
+#define STTS_BIT_CH(x) (0x3000c + ((x) << 2))
+#define SPDIF_CTRL_ADDR 0x3004c
+#define SPDIF_CH1_CS_3100_ADDR 0x30050
+#define SPDIF_CH1_CS_6332_ADDR 0x30054
+#define SPDIF_CH1_CS_9564_ADDR 0x30058
+#define SPDIF_CH1_CS_12796_ADDR 0x3005c
+#define SPDIF_CH1_CS_159128_ADDR 0x30060
+#define SPDIF_CH1_CS_191160_ADDR 0x30064
+#define SPDIF_CH2_CS_3100_ADDR 0x30068
+#define SPDIF_CH2_CS_6332_ADDR 0x3006c
+#define SPDIF_CH2_CS_9564_ADDR 0x30070
+#define SPDIF_CH2_CS_12796_ADDR 0x30074
+#define SPDIF_CH2_CS_159128_ADDR 0x30078
+#define SPDIF_CH2_CS_191160_ADDR 0x3007c
+#define SMPL2PKT_CNTL 0x30080
+#define SMPL2PKT_CNFG 0x30084
+#define FIFO_CNTL 0x30088
+#define FIFO_STTS 0x3008c
+
+/* source pif addr */
+#define SOURCE_PIF_WR_ADDR 0x30800
+#define SOURCE_PIF_WR_REQ 0x30804
+#define SOURCE_PIF_RD_ADDR 0x30808
+#define SOURCE_PIF_RD_REQ 0x3080c
+#define SOURCE_PIF_DATA_WR 0x30810
+#define SOURCE_PIF_DATA_RD 0x30814
+#define SOURCE_PIF_FIFO1_FLUSH 0x30818
+#define SOURCE_PIF_FIFO2_FLUSH 0x3081c
+#define SOURCE_PIF_STATUS 0x30820
+#define SOURCE_PIF_INTERRUPT_SOURCE 0x30824
+#define SOURCE_PIF_INTERRUPT_MASK 0x30828
+#define SOURCE_PIF_PKT_ALLOC_REG 0x3082c
+#define SOURCE_PIF_PKT_ALLOC_WR_EN 0x30830
+#define SOURCE_PIF_SW_RESET 0x30834
+
+/* bellow registers need access by mailbox */
+/* source car addr */
+#define SOURCE_HDTX_CAR 0x0900
+#define SOURCE_DPTX_CAR 0x0904
+#define SOURCE_PHY_CAR 0x0908
+#define SOURCE_CEC_CAR 0x090c
+#define SOURCE_CBUS_CAR 0x0910
+#define SOURCE_PKT_CAR 0x0918
+#define SOURCE_AIF_CAR 0x091c
+#define SOURCE_CIPHER_CAR 0x0920
+#define SOURCE_CRYPTO_CAR 0x0924
+
+/* clock meters addr */
+#define CM_CTRL 0x0a00
+#define CM_I2S_CTRL 0x0a04
+#define CM_SPDIF_CTRL 0x0a08
+#define CM_VID_CTRL 0x0a0c
+#define CM_LANE_CTRL 0x0a10
+#define I2S_NM_STABLE 0x0a14
+#define I2S_NCTS_STABLE 0x0a18
+#define SPDIF_NM_STABLE 0x0a1c
+#define SPDIF_NCTS_STABLE 0x0a20
+#define NMVID_MEAS_STABLE 0x0a24
+#define I2S_MEAS 0x0a40
+#define SPDIF_MEAS 0x0a80
+#define NMVID_MEAS 0x0ac0
+
+/* source vif addr */
+#define BND_HSYNC2VSYNC 0x0b00
+#define HSYNC2VSYNC_F1_L1 0x0b04
+#define HSYNC2VSYNC_F2_L1 0x0b08
+#define HSYNC2VSYNC_STATUS 0x0b0c
+#define HSYNC2VSYNC_POL_CTRL 0x0b10
+
+/* dptx phy addr */
+#define DP_TX_PHY_CONFIG_REG 0x2000
+#define DP_TX_PHY_STATUS_REG 0x2004
+#define DP_TX_PHY_SW_RESET 0x2008
+#define DP_TX_PHY_SCRAMBLER_SEED 0x200c
+#define DP_TX_PHY_TRAINING_01_04 0x2010
+#define DP_TX_PHY_TRAINING_05_08 0x2014
+#define DP_TX_PHY_TRAINING_09_10 0x2018
+#define TEST_COR 0x23fc
+
+/* dptx hpd addr */
+#define HPD_IRQ_DET_MIN_TIMER 0x2100
+#define HPD_IRQ_DET_MAX_TIMER 0x2104
+#define HPD_UNPLGED_DET_MIN_TIMER 0x2108
+#define HPD_STABLE_TIMER 0x210c
+#define HPD_FILTER_TIMER 0x2110
+#define HPD_EVENT_MASK 0x211c
+#define HPD_EVENT_DET 0x2120
+
+/* dpyx framer addr */
+#define DP_FRAMER_GLOBAL_CONFIG 0x2200
+#define DP_SW_RESET 0x2204
+#define DP_FRAMER_TU 0x2208
+#define DP_FRAMER_PXL_REPR 0x220c
+#define DP_FRAMER_SP 0x2210
+#define AUDIO_PACK_CONTROL 0x2214
+#define DP_VC_TABLE(x) (0x2218 + ((x) << 2))
+#define DP_VB_ID 0x2258
+#define DP_MTPH_LVP_CONTROL 0x225c
+#define DP_MTPH_SYMBOL_VALUES 0x2260
+#define DP_MTPH_ECF_CONTROL 0x2264
+#define DP_MTPH_ACT_CONTROL 0x2268
+#define DP_MTPH_STATUS 0x226c
+#define DP_INTERRUPT_SOURCE 0x2270
+#define DP_INTERRUPT_MASK 0x2274
+#define DP_FRONT_BACK_PORCH 0x2278
+#define DP_BYTE_COUNT 0x227c
+
+/* dptx stream addr */
+#define MSA_HORIZONTAL_0 0x2280
+#define MSA_HORIZONTAL_1 0x2284
+#define MSA_VERTICAL_0 0x2288
+#define MSA_VERTICAL_1 0x228c
+#define MSA_MISC 0x2290
+#define STREAM_CONFIG 0x2294
+#define AUDIO_PACK_STATUS 0x2298
+#define VIF_STATUS 0x229c
+#define PCK_STUFF_STATUS_0 0x22a0
+#define PCK_STUFF_STATUS_1 0x22a4
+#define INFO_PACK_STATUS 0x22a8
+#define RATE_GOVERNOR_STATUS 0x22ac
+#define DP_HORIZONTAL 0x22b0
+#define DP_VERTICAL_0 0x22b4
+#define DP_VERTICAL_1 0x22b8
+#define DP_BLOCK_SDP 0x22bc
+
+/* dptx glbl addr */
+#define DPTX_LANE_EN 0x2300
+#define DPTX_ENHNCD 0x2304
+#define DPTX_INT_MASK 0x2308
+#define DPTX_INT_STATUS 0x230c
+
+/* dp aux addr */
+#define DP_AUX_HOST_CONTROL 0x2800
+#define DP_AUX_INTERRUPT_SOURCE 0x2804
+#define DP_AUX_INTERRUPT_MASK 0x2808
+#define DP_AUX_SWAP_INVERSION_CONTROL 0x280c
+#define DP_AUX_SEND_NACK_TRANSACTION 0x2810
+#define DP_AUX_CLEAR_RX 0x2814
+#define DP_AUX_CLEAR_TX 0x2818
+#define DP_AUX_TIMER_STOP 0x281c
+#define DP_AUX_TIMER_CLEAR 0x2820
+#define DP_AUX_RESET_SW 0x2824
+#define DP_AUX_DIVIDE_2M 0x2828
+#define DP_AUX_TX_PREACHARGE_LENGTH 0x282c
+#define DP_AUX_FREQUENCY_1M_MAX 0x2830
+#define DP_AUX_FREQUENCY_1M_MIN 0x2834
+#define DP_AUX_RX_PRE_MIN 0x2838
+#define DP_AUX_RX_PRE_MAX 0x283c
+#define DP_AUX_TIMER_PRESET 0x2840
+#define DP_AUX_NACK_FORMAT 0x2844
+#define DP_AUX_TX_DATA 0x2848
+#define DP_AUX_RX_DATA 0x284c
+#define DP_AUX_TX_STATUS 0x2850
+#define DP_AUX_RX_STATUS 0x2854
+#define DP_AUX_RX_CYCLE_COUNTER 0x2858
+#define DP_AUX_MAIN_STATES 0x285c
+#define DP_AUX_MAIN_TIMER 0x2860
+#define DP_AUX_AFE_OUT 0x2864
+
+/* crypto addr */
+#define CRYPTO_HDCP_REVISION 0x5800
+#define HDCP_CRYPTO_CONFIG 0x5804
+#define CRYPTO_INTERRUPT_SOURCE 0x5808
+#define CRYPTO_INTERRUPT_MASK 0x580c
+#define CRYPTO22_CONFIG 0x5818
+#define CRYPTO22_STATUS 0x581c
+#define SHA_256_DATA_IN 0x583c
+#define SHA_256_DATA_OUT_(x) (0x5850 + ((x) << 2))
+#define AES_32_KEY_(x) (0x5870 + ((x) << 2))
+#define AES_32_DATA_IN 0x5880
+#define AES_32_DATA_OUT_(x) (0x5884 + ((x) << 2))
+#define CRYPTO14_CONFIG 0x58a0
+#define CRYPTO14_STATUS 0x58a4
+#define CRYPTO14_PRNM_OUT 0x58a8
+#define CRYPTO14_KM_0 0x58ac
+#define CRYPTO14_KM_1 0x58b0
+#define CRYPTO14_AN_0 0x58b4
+#define CRYPTO14_AN_1 0x58b8
+#define CRYPTO14_YOUR_KSV_0 0x58bc
+#define CRYPTO14_YOUR_KSV_1 0x58c0
+#define CRYPTO14_MI_0 0x58c4
+#define CRYPTO14_MI_1 0x58c8
+#define CRYPTO14_TI_0 0x58cc
+#define CRYPTO14_KI_0 0x58d0
+#define CRYPTO14_KI_1 0x58d4
+#define CRYPTO14_BLOCKS_NUM 0x58d8
+#define CRYPTO14_KEY_MEM_DATA_0 0x58dc
+#define CRYPTO14_KEY_MEM_DATA_1 0x58e0
+#define CRYPTO14_SHA1_MSG_DATA 0x58e4
+#define CRYPTO14_SHA1_V_VALUE_(x) (0x58e8 + ((x) << 2))
+#define TRNG_CTRL 0x58fc
+#define TRNG_DATA_RDY 0x5900
+#define TRNG_DATA 0x5904
+
+/* cipher addr */
+#define HDCP_REVISION 0x60000
+#define INTERRUPT_SOURCE 0x60004
+#define INTERRUPT_MASK 0x60008
+#define HDCP_CIPHER_CONFIG 0x6000c
+#define AES_128_KEY_0 0x60010
+#define AES_128_KEY_1 0x60014
+#define AES_128_KEY_2 0x60018
+#define AES_128_KEY_3 0x6001c
+#define AES_128_RANDOM_0 0x60020
+#define AES_128_RANDOM_1 0x60024
+#define CIPHER14_KM_0 0x60028
+#define CIPHER14_KM_1 0x6002c
+#define CIPHER14_STATUS 0x60030
+#define CIPHER14_RI_PJ_STATUS 0x60034
+#define CIPHER_MODE 0x60038
+#define CIPHER14_AN_0 0x6003c
+#define CIPHER14_AN_1 0x60040
+#define CIPHER22_AUTH 0x60044
+#define CIPHER14_R0_DP_STATUS 0x60048
+#define CIPHER14_BOOTSTRAP 0x6004c
+
+#define DPTX_FRMR_DATA_CLK_RSTN_EN BIT(11)
+#define DPTX_FRMR_DATA_CLK_EN BIT(10)
+#define DPTX_PHY_DATA_RSTN_EN BIT(9)
+#define DPTX_PHY_DATA_CLK_EN BIT(8)
+#define DPTX_PHY_CHAR_RSTN_EN BIT(7)
+#define DPTX_PHY_CHAR_CLK_EN BIT(6)
+#define SOURCE_AUX_SYS_CLK_RSTN_EN BIT(5)
+#define SOURCE_AUX_SYS_CLK_EN BIT(4)
+#define DPTX_SYS_CLK_RSTN_EN BIT(3)
+#define DPTX_SYS_CLK_EN BIT(2)
+#define CFG_DPTX_VIF_CLK_RSTN_EN BIT(1)
+#define CFG_DPTX_VIF_CLK_EN BIT(0)
+
+#define SOURCE_PHY_RSTN_EN BIT(1)
+#define SOURCE_PHY_CLK_EN BIT(0)
+
+#define SOURCE_PKT_SYS_RSTN_EN BIT(3)
+#define SOURCE_PKT_SYS_CLK_EN BIT(2)
+#define SOURCE_PKT_DATA_RSTN_EN BIT(1)
+#define SOURCE_PKT_DATA_CLK_EN BIT(0)
+
+#define SPDIF_CDR_CLK_RSTN_EN BIT(5)
+#define SPDIF_CDR_CLK_EN BIT(4)
+#define SOURCE_AIF_SYS_RSTN_EN BIT(3)
+#define SOURCE_AIF_SYS_CLK_EN BIT(2)
+#define SOURCE_AIF_CLK_RSTN_EN BIT(1)
+#define SOURCE_AIF_CLK_EN BIT(0)
+
+#define SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN BIT(3)
+#define SOURCE_CIPHER_SYS_CLK_EN BIT(2)
+#define SOURCE_CIPHER_CHAR_CLK_RSTN_EN BIT(1)
+#define SOURCE_CIPHER_CHAR_CLK_EN BIT(0)
+
+#define SOURCE_CRYPTO_SYS_CLK_RSTN_EN BIT(1)
+#define SOURCE_CRYPTO_SYS_CLK_EN BIT(0)
+
+#define APB_IRAM_PATH BIT(2)
+#define APB_DRAM_PATH BIT(1)
+#define APB_XT_RESET BIT(0)
+
+#define MAILBOX_INT_MASK_BIT BIT(1)
+#define PIF_INT_MASK_BIT BIT(0)
+#define ALL_INT_MASK 3
+
+/* mailbox */
+#define MB_OPCODE_ID 0
+#define MB_MODULE_ID 1
+#define MB_SIZE_MSB_ID 2
+#define MB_SIZE_LSB_ID 3
+#define MB_DATA_ID 4
+
+#define MB_MODULE_ID_DP_TX 0x01
+#define MB_MODULE_ID_HDCP_TX 0x07
+#define MB_MODULE_ID_HDCP_RX 0x08
+#define MB_MODULE_ID_HDCP_GENERAL 0x09
+#define MB_MODULE_ID_GENERAL 0x0a
+
+/* general opcode */
+#define GENERAL_MAIN_CONTROL 0x01
+#define GENERAL_TEST_ECHO 0x02
+#define GENERAL_BUS_SETTINGS 0x03
+#define GENERAL_TEST_ACCESS 0x04
+
+#define DPTX_SET_POWER_MNG 0x00
+#define DPTX_SET_HOST_CAPABILITIES 0x01
+#define DPTX_GET_EDID 0x02
+#define DPTX_READ_DPCD 0x03
+#define DPTX_WRITE_DPCD 0x04
+#define DPTX_ENABLE_EVENT 0x05
+#define DPTX_WRITE_REGISTER 0x06
+#define DPTX_READ_REGISTER 0x07
+#define DPTX_WRITE_FIELD 0x08
+#define DPTX_TRAINING_CONTROL 0x09
+#define DPTX_READ_EVENT 0x0a
+#define DPTX_READ_LINK_STAT 0x0b
+#define DPTX_SET_VIDEO 0x0c
+#define DPTX_SET_AUDIO 0x0d
+#define DPTX_GET_LAST_AUX_STAUS 0x0e
+#define DPTX_SET_LINK_BREAK_POINT 0x0f
+#define DPTX_FORCE_LANES 0x10
+#define DPTX_HPD_STATE 0x11
+
+#define FW_STANDBY 0
+#define FW_ACTIVE 1
+
+#define DPTX_EVENT_ENABLE_HPD BIT(0)
+#define DPTX_EVENT_ENABLE_TRAINING BIT(1)
+
+#define LINK_TRAINING_NOT_ACTIVE 0
+#define LINK_TRAINING_RUN 1
+#define LINK_TRAINING_RESTART 2
+
+#define CONTROL_VIDEO_IDLE 0
+#define CONTROL_VIDEO_VALID 1
+
+#define TU_CNT_RST_EN BIT(15)
+#define VIF_BYPASS_INTERLACE BIT(13)
+#define INTERLACE_FMT_DET BIT(12)
+#define INTERLACE_DTCT_WIN 0x20
+
+#define DP_FRAMER_SP_INTERLACE_EN BIT(2)
+#define DP_FRAMER_SP_HSP BIT(1)
+#define DP_FRAMER_SP_VSP BIT(0)
+
+/* capability */
+#define AUX_HOST_INVERT 3
+#define FAST_LT_SUPPORT 1
+#define FAST_LT_NOT_SUPPORT 0
+#define LANE_MAPPING_NORMAL 0x1b
+#define LANE_MAPPING_FLIPPED 0xe4
+#define ENHANCED 1
+#define SCRAMBLER_EN BIT(4)
+
+#define FULL_LT_STARTED BIT(0)
+#define FASE_LT_STARTED BIT(1)
+#define CLK_RECOVERY_FINISHED BIT(2)
+#define EQ_PHASE_FINISHED BIT(3)
+#define FASE_LT_START_FINISHED BIT(4)
+#define CLK_RECOVERY_FAILED BIT(5)
+#define EQ_PHASE_FAILED BIT(6)
+#define FASE_LT_FAILED BIT(7)
+
+#define DPTX_HPD_EVENT BIT(0)
+#define DPTX_TRAINING_EVENT BIT(1)
+#define HDCP_TX_STATUS_EVENT BIT(4)
+#define HDCP2_TX_IS_KM_STORED_EVENT BIT(5)
+#define HDCP2_TX_STORE_KM_EVENT BIT(6)
+#define HDCP_TX_IS_RECEIVER_ID_VALID_EVENT BIT(7)
+
+#define TU_SIZE 30
+#define CDN_DP_MAX_LINK_RATE DP_LINK_BW_5_4
+
+/* audio */
+#define AUDIO_PACK_EN BIT(8)
+#define SAMPLING_FREQ(x) (((x) & 0xf) << 16)
+#define ORIGINAL_SAMP_FREQ(x) (((x) & 0xf) << 24)
+#define SYNC_WR_TO_CH_ZERO BIT(1)
+#define I2S_DEC_START BIT(1)
+#define AUDIO_SW_RST BIT(0)
+#define SMPL2PKT_EN BIT(1)
+#define MAX_NUM_CH(x) (((x) & 0x1f) - 1)
+#define NUM_OF_I2S_PORTS(x) ((((x) / 2 - 1) & 0x3) << 5)
+#define AUDIO_TYPE_LPCM (2 << 7)
+#define CFG_SUB_PCKT_NUM(x) ((((x) - 1) & 0x7) << 11)
+#define AUDIO_CH_NUM(x) ((((x) - 1) & 0x1f) << 2)
+#define TRANS_SMPL_WIDTH_16 0
+#define TRANS_SMPL_WIDTH_24 BIT(11)
+#define TRANS_SMPL_WIDTH_32 (2 << 11)
+#define I2S_DEC_PORT_EN(x) (((x) & 0xf) << 17)
+#define SPDIF_ENABLE BIT(21)
+#define SPDIF_AVG_SEL BIT(20)
+#define SPDIF_JITTER_BYPASS BIT(19)
+#define SPDIF_FIFO_MID_RANGE(x) (((x) & 0xff) << 11)
+#define SPDIF_JITTER_THRSH(x) (((x) & 0xff) << 3)
+#define SPDIF_JITTER_AVG_WIN(x) ((x) & 0x7)
+
+/* Reference cycles when using lane clock as reference */
+#define LANE_REF_CYC 0x8000
+
+enum voltage_swing_level {
+ VOLTAGE_LEVEL_0,
+ VOLTAGE_LEVEL_1,
+ VOLTAGE_LEVEL_2,
+ VOLTAGE_LEVEL_3,
+};
+
+enum pre_emphasis_level {
+ PRE_EMPHASIS_LEVEL_0,
+ PRE_EMPHASIS_LEVEL_1,
+ PRE_EMPHASIS_LEVEL_2,
+ PRE_EMPHASIS_LEVEL_3,
+};
+
+enum pattern_set {
+ PTS1 = BIT(0),
+ PTS2 = BIT(1),
+ PTS3 = BIT(2),
+ PTS4 = BIT(3),
+ DP_NONE = BIT(4)
+};
+
+enum vic_color_depth {
+ BCS_6 = 0x1,
+ BCS_8 = 0x2,
+ BCS_10 = 0x4,
+ BCS_12 = 0x8,
+ BCS_16 = 0x10,
+};
+
+enum vic_bt_type {
+ BT_601 = 0x0,
+ BT_709 = 0x1,
+};
+
+void cdn_dp_clock_reset(struct cdn_dp_device *dp);
+
+void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, u32 clk);
+int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
+ u32 i_size, const u32 *d_mem, u32 d_size);
+int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable);
+int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip);
+int cdn_dp_event_config(struct cdn_dp_device *dp);
+u32 cdn_dp_get_event(struct cdn_dp_device *dp);
+int cdn_dp_get_hpd_status(struct cdn_dp_device *dp);
+int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value);
+int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len);
+int cdn_dp_get_edid_block(void *dp, u8 *edid,
+ unsigned int block, size_t length);
+int cdn_dp_train_link(struct cdn_dp_device *dp);
+int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active);
+int cdn_dp_config_video(struct cdn_dp_device *dp);
+int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio);
+int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable);
+int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio);
+#endif /* _CDN_DP_REG_H */
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 0665fb915579..a6d4a0236e8f 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -257,8 +257,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct drm_encoder *encoder;
struct rockchip_hdmi *hdmi;
- struct resource *iores;
- int irq;
int ret;
if (!pdev->dev.of_node)
@@ -273,14 +271,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
hdmi->dev = &pdev->dev;
encoder = &hdmi->encoder;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENXIO;
-
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
@@ -301,7 +291,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
- ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+ ret = dw_hdmi_bind(pdev, encoder, plat_data);
/*
* If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
@@ -316,7 +306,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
void *data)
{
- return dw_hdmi_unbind(dev, master, data);
+ return dw_hdmi_unbind(dev);
}
static const struct component_ops dw_hdmi_rockchip_ops = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 2390c8577617..b360e6251836 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -14,19 +14,19 @@
* GNU General Public License for more details.
*/
-#include <asm/dma-iommu.h>
-
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-iommu.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/component.h>
#include <linux/console.h>
+#include <linux/iommu.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
@@ -50,28 +50,31 @@ static struct drm_driver rockchip_drm_driver;
int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
struct device *dev)
{
- struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping;
+ struct rockchip_drm_private *private = drm_dev->dev_private;
int ret;
if (!is_support_iommu)
return 0;
- ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
- if (ret)
+ ret = iommu_attach_device(private->domain, dev);
+ if (ret) {
+ dev_err(dev, "Failed to attach iommu device\n");
return ret;
+ }
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
-
- return arm_iommu_attach_device(dev, mapping);
+ return 0;
}
void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
struct device *dev)
{
+ struct rockchip_drm_private *private = drm_dev->dev_private;
+ struct iommu_domain *domain = private->domain;
+
if (!is_support_iommu)
return;
- arm_iommu_detach_device(dev);
+ iommu_detach_device(domain, dev);
}
int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
@@ -99,24 +102,11 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
priv->crtc_funcs[pipe] = NULL;
}
-static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
- int pipe)
-{
- struct drm_crtc *crtc;
- int i = 0;
-
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
- if (i++ == pipe)
- return crtc;
-
- return NULL;
-}
-
static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev,
unsigned int pipe)
{
struct rockchip_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
if (crtc && priv->crtc_funcs[pipe] &&
priv->crtc_funcs[pipe]->enable_vblank)
@@ -129,18 +119,53 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
unsigned int pipe)
{
struct rockchip_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
if (crtc && priv->crtc_funcs[pipe] &&
priv->crtc_funcs[pipe]->enable_vblank)
priv->crtc_funcs[pipe]->disable_vblank(crtc);
}
+static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
+{
+ struct rockchip_drm_private *private = drm_dev->dev_private;
+ struct iommu_domain_geometry *geometry;
+ u64 start, end;
+
+ if (!is_support_iommu)
+ return 0;
+
+ private->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!private->domain)
+ return -ENOMEM;
+
+ geometry = &private->domain->geometry;
+ start = geometry->aperture_start;
+ end = geometry->aperture_end;
+
+ DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n",
+ start, end);
+ drm_mm_init(&private->mm, start, end - start + 1);
+ mutex_init(&private->mm_lock);
+
+ return 0;
+}
+
+static void rockchip_iommu_cleanup(struct drm_device *drm_dev)
+{
+ struct rockchip_drm_private *private = drm_dev->dev_private;
+
+ if (!is_support_iommu)
+ return;
+
+ drm_mm_takedown(&private->mm);
+ iommu_domain_free(private->domain);
+}
+
static int rockchip_drm_bind(struct device *dev)
{
struct drm_device *drm_dev;
struct rockchip_drm_private *private;
- struct dma_iommu_mapping *mapping = NULL;
int ret;
drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
@@ -164,38 +189,14 @@ static int rockchip_drm_bind(struct device *dev)
rockchip_drm_mode_config_init(drm_dev);
- dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
- GFP_KERNEL);
- if (!dev->dma_parms) {
- ret = -ENOMEM;
+ ret = rockchip_drm_init_iommu(drm_dev);
+ if (ret)
goto err_config_cleanup;
- }
-
- if (is_support_iommu) {
- /* TODO(djkurtz): fetch the mapping start/size from somewhere */
- mapping = arm_iommu_create_mapping(&platform_bus_type,
- 0x00000000,
- SZ_2G);
- if (IS_ERR(mapping)) {
- ret = PTR_ERR(mapping);
- goto err_config_cleanup;
- }
-
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- goto err_release_mapping;
-
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
-
- ret = arm_iommu_attach_device(dev, mapping);
- if (ret)
- goto err_release_mapping;
- }
/* Try to bind all sub drivers. */
ret = component_bind_all(dev, drm_dev);
if (ret)
- goto err_detach_device;
+ goto err_iommu_cleanup;
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev);
@@ -220,8 +221,6 @@ static int rockchip_drm_bind(struct device *dev)
if (ret)
goto err_fbdev_fini;
- if (is_support_iommu)
- arm_iommu_release_mapping(mapping);
return 0;
err_fbdev_fini:
rockchip_drm_fbdev_fini(drm_dev);
@@ -230,12 +229,8 @@ err_vblank_cleanup:
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
component_unbind_all(dev, drm_dev);
-err_detach_device:
- if (is_support_iommu)
- arm_iommu_detach_device(dev);
-err_release_mapping:
- if (is_support_iommu)
- arm_iommu_release_mapping(mapping);
+err_iommu_cleanup:
+ rockchip_iommu_cleanup(drm_dev);
err_config_cleanup:
drm_mode_config_cleanup(drm_dev);
drm_dev->dev_private = NULL;
@@ -252,8 +247,7 @@ static void rockchip_drm_unbind(struct device *dev)
drm_vblank_cleanup(drm_dev);
drm_kms_helper_poll_fini(drm_dev);
component_unbind_all(dev, drm_dev);
- if (is_support_iommu)
- arm_iommu_detach_device(dev);
+ rockchip_iommu_cleanup(drm_dev);
drm_mode_config_cleanup(drm_dev);
drm_dev->dev_private = NULL;
drm_dev_unregister(drm_dev);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index fb6226cf84b7..adc39302bec5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -30,6 +30,7 @@
struct drm_device;
struct drm_connector;
+struct iommu_domain;
/*
* Rockchip drm private crtc funcs.
@@ -60,7 +61,10 @@ struct rockchip_drm_private {
struct drm_gem_object *fbdev_bo;
const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
struct drm_atomic_state *state;
-
+ struct iommu_domain *domain;
+ /* protect drm_mm on multi-threads */
+ struct mutex mm_lock;
+ struct drm_mm mm;
struct list_head psr_list;
spinlock_t psr_list_lock;
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 0f6eda023bd0..c9ccdf8f44bb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -92,7 +92,7 @@ rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cm
if (!rockchip_fb)
return ERR_PTR(-ENOMEM);
- drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &rockchip_fb->fb, mode_cmd);
for (i = 0; i < num_planes; i++)
rockchip_fb->obj[i] = obj[i];
@@ -213,7 +213,7 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
if (IS_ERR(rockchip_fb))
- return NULL;
+ return ERR_CAST(rockchip_fb);
return &rockchip_fb->fb;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 8f639c8597a5..70ad50dd594d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -94,7 +94,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
fbi->fbops = &rockchip_drm_fbdev_ops;
fb = helper->fb;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
offset = fbi->var.xoffset * bytes_per_pixel;
@@ -106,7 +106,8 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
fbi->fix.smem_len = rk_obj->base.size;
DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n",
- fb->width, fb->height, fb->depth, rk_obj->kvaddr,
+ fb->width, fb->height, fb->format->depth,
+ rk_obj->kvaddr,
offset, size);
fbi->skip_vt_switch = true;
@@ -128,19 +129,16 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
{
struct rockchip_drm_private *private = dev->dev_private;
struct drm_fb_helper *helper;
- unsigned int num_crtc;
int ret;
if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
return -EINVAL;
- num_crtc = dev->mode_config.num_crtc;
-
helper = &private->fbdev_helper;
drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, num_crtc, ROCKCHIP_MAX_CONNECTOR);
+ ret = drm_fb_helper_init(dev, helper, ROCKCHIP_MAX_CONNECTOR);
if (ret < 0) {
dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n",
ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index b70f9423379c..df9e57064f19 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -16,11 +16,146 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/drm_vma_manager.h>
+#include <linux/iommu.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
-static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
+static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_device *drm = rk_obj->base.dev;
+ struct rockchip_drm_private *private = drm->dev_private;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ ssize_t ret;
+
+ mutex_lock(&private->mm_lock);
+
+ ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
+ rk_obj->base.size, PAGE_SIZE,
+ 0, 0);
+
+ mutex_unlock(&private->mm_lock);
+ if (ret < 0) {
+ DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
+ return ret;
+ }
+
+ rk_obj->dma_addr = rk_obj->mm.start;
+
+ ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
+ rk_obj->sgt->nents, prot);
+ if (ret < rk_obj->base.size) {
+ DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
+ ret, rk_obj->base.size);
+ ret = -ENOMEM;
+ goto err_remove_node;
+ }
+
+ rk_obj->size = ret;
+
+ return 0;
+
+err_remove_node:
+ drm_mm_remove_node(&rk_obj->mm);
+
+ return ret;
+}
+
+static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_device *drm = rk_obj->base.dev;
+ struct rockchip_drm_private *private = drm->dev_private;
+
+ iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
+
+ mutex_lock(&private->mm_lock);
+
+ drm_mm_remove_node(&rk_obj->mm);
+
+ mutex_unlock(&private->mm_lock);
+
+ return 0;
+}
+
+static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_device *drm = rk_obj->base.dev;
+ int ret, i;
+ struct scatterlist *s;
+
+ rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
+ if (IS_ERR(rk_obj->pages))
+ return PTR_ERR(rk_obj->pages);
+
+ rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
+
+ rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+ if (IS_ERR(rk_obj->sgt)) {
+ ret = PTR_ERR(rk_obj->sgt);
+ goto err_put_pages;
+ }
+
+ /*
+ * Fake up the SG table so that dma_sync_sg_for_device() can be used
+ * to flush the pages associated with it.
+ *
+ * TODO: Replace this by drm_clflush_sg() once it can be implemented
+ * without relying on symbols that are not exported.
+ */
+ for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
+ sg_dma_address(s) = sg_phys(s);
+
+ dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
+ DMA_TO_DEVICE);
+
+ return 0;
+
+err_put_pages:
+ drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
+ return ret;
+}
+
+static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
+{
+ sg_free_table(rk_obj->sgt);
+ kfree(rk_obj->sgt);
+ drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
+}
+
+static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
+ bool alloc_kmap)
+{
+ int ret;
+
+ ret = rockchip_gem_get_pages(rk_obj);
+ if (ret < 0)
+ return ret;
+
+ ret = rockchip_gem_iommu_map(rk_obj);
+ if (ret < 0)
+ goto err_free;
+
+ if (alloc_kmap) {
+ rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!rk_obj->kvaddr) {
+ DRM_ERROR("failed to vmap() buffer\n");
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+ }
+
+ return 0;
+
+err_unmap:
+ rockchip_gem_iommu_unmap(rk_obj);
+err_free:
+ rockchip_gem_put_pages(rk_obj);
+
+ return ret;
+}
+
+static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
bool alloc_kmap)
{
struct drm_gem_object *obj = &rk_obj->base;
@@ -42,7 +177,27 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
return 0;
}
-static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
+static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
+ bool alloc_kmap)
+{
+ struct drm_gem_object *obj = &rk_obj->base;
+ struct drm_device *drm = obj->dev;
+ struct rockchip_drm_private *private = drm->dev_private;
+
+ if (private->domain)
+ return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
+ else
+ return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
+}
+
+static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
+{
+ vunmap(rk_obj->kvaddr);
+ rockchip_gem_iommu_unmap(rk_obj);
+ rockchip_gem_put_pages(rk_obj);
+}
+
+static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
{
struct drm_gem_object *obj = &rk_obj->base;
struct drm_device *drm = obj->dev;
@@ -51,23 +206,68 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
rk_obj->dma_attrs);
}
-static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
+{
+ if (rk_obj->pages)
+ rockchip_gem_free_iommu(rk_obj);
+ else
+ rockchip_gem_free_dma(rk_obj);
+}
+static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ unsigned int i, count = obj->size >> PAGE_SHIFT;
+ unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long uaddr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff;
+ unsigned long end = user_count + offset;
int ret;
+
+ if (user_count == 0)
+ return -ENXIO;
+ if (end > count)
+ return -ENXIO;
+
+ for (i = offset; i < end; i++) {
+ ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
+ if (ret)
+ return ret;
+ uaddr += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
struct drm_device *drm = obj->dev;
+ return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+ obj->size, rk_obj->dma_attrs);
+}
+
+static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+
/*
- * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
+ * We allocated a struct page table for rk_obj, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
- obj->size, rk_obj->dma_attrs);
+ if (rk_obj->pages)
+ ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
+ else
+ ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
+
if (ret)
drm_gem_vm_close(vma);
@@ -101,6 +301,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return rockchip_drm_gem_object_mmap(obj, vma);
}
+static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
+{
+ drm_gem_object_release(&rk_obj->base);
+ kfree(rk_obj);
+}
+
struct rockchip_gem_object *
rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
bool alloc_kmap)
@@ -117,7 +323,7 @@ struct rockchip_gem_object *
obj = &rk_obj->base;
- drm_gem_private_object_init(drm, obj, size);
+ drm_gem_object_init(drm, obj, size);
ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
if (ret)
@@ -126,7 +332,7 @@ struct rockchip_gem_object *
return rk_obj;
err_free_rk_obj:
- kfree(rk_obj);
+ rockchip_gem_release_object(rk_obj);
return ERR_PTR(ret);
}
@@ -138,13 +344,11 @@ void rockchip_gem_free_object(struct drm_gem_object *obj)
{
struct rockchip_gem_object *rk_obj;
- drm_gem_free_mmap_offset(obj);
-
rk_obj = to_rockchip_obj(obj);
rockchip_gem_free_buf(rk_obj);
- kfree(rk_obj);
+ rockchip_gem_release_object(rk_obj);
}
/*
@@ -253,6 +457,9 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct sg_table *sgt;
int ret;
+ if (rk_obj->pages)
+ return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
@@ -273,6 +480,10 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ if (rk_obj->pages)
+ return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+
if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return NULL;
@@ -281,5 +492,12 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- /* Nothing to do */
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+
+ if (rk_obj->pages) {
+ vunmap(vaddr);
+ return;
+ }
+
+ /* Nothing to do if allocated by DMA mapping API. */
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index 18b3488db4ec..3f6ea4d18a5c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -23,7 +23,15 @@ struct rockchip_gem_object {
void *kvaddr;
dma_addr_t dma_addr;
+ /* Used when IOMMU is disabled */
unsigned long dma_attrs;
+
+ /* Used when IOMMU is enabled */
+ struct drm_mm_node mm;
+ unsigned long num_pages;
+ struct page **pages;
+ struct sg_table *sgt;
+ size_t size;
};
struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c7eba305c488..76c79ac57df0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -531,6 +531,8 @@ static int vop_enable(struct drm_crtc *crtc)
}
memcpy(vop->regs, vop->regsbak, vop->len);
+ vop_cfg_done(vop);
+
/*
* At here, vop clock & iommu is enable, R/W vop regs would be safe.
*/
@@ -582,6 +584,8 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
spin_unlock(&vop->reg_lock);
}
+ vop_cfg_done(vop);
+
drm_crtc_vblank_off(crtc);
/*
@@ -668,7 +672,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
if (!state->visible)
return 0;
- ret = vop_convert_format(fb->pixel_format);
+ ret = vop_convert_format(fb->format->format);
if (ret < 0)
return ret;
@@ -676,7 +680,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (is_yuv_support(fb->pixel_format) && ((state->src.x1 >> 16) % 2))
+ if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
return -EINVAL;
return 0;
@@ -749,21 +753,21 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
- offset = (src->x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
+ offset = (src->x1 >> 16) * fb->format->cpp[0];
offset += (src->y1 >> 16) * fb->pitches[0];
dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
- format = vop_convert_format(fb->pixel_format);
+ format = vop_convert_format(fb->format->format);
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
- if (is_yuv_support(fb->pixel_format)) {
- int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
- int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
- int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+ if (is_yuv_support(fb->format->format)) {
+ int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
+ int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
+ int bpp = fb->format->cpp[1];
uv_obj = rockchip_fb_get_gem_obj(fb, 1);
rk_uv_obj = to_rockchip_obj(uv_obj);
@@ -779,16 +783,16 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
if (win->phy->scl)
scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
drm_rect_width(dest), drm_rect_height(dest),
- fb->pixel_format);
+ fb->format->format);
VOP_WIN_SET(vop, win, act_info, act_info);
VOP_WIN_SET(vop, win, dsp_info, dsp_info);
VOP_WIN_SET(vop, win, dsp_st, dsp_st);
- rb_swap = has_rb_swapped(fb->pixel_format);
+ rb_swap = has_rb_swapped(fb->format->format);
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
- if (is_alpha_support(fb->pixel_format)) {
+ if (is_alpha_support(fb->format->format)) {
VOP_WIN_SET(vop, win, dst_alpha_ctl,
DST_FACTOR_M0(ALPHA_SRC_INVERSE));
val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
@@ -932,9 +936,11 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
vop_dsp_hold_valid_irq_disable(vop);
}
- pin_pol = 0x8;
- pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
- pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
+ pin_pol = BIT(DCLK_INVERT);
+ pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ?
+ 0 : BIT(HSYNC_POSITIVE);
+ pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ?
+ 0 : BIT(VSYNC_POSITIVE);
VOP_CTRL_SET(vop, pin_pol, pin_pol);
switch (s->output_type) {
@@ -954,6 +960,11 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
VOP_CTRL_SET(vop, mipi_en, 1);
break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ pin_pol &= ~BIT(DCLK_INVERT);
+ VOP_CTRL_SET(vop, dp_pin_pol, pin_pol);
+ VOP_CTRL_SET(vop, dp_en, 1);
+ break;
default:
DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
s->output_type);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 1dbc52615257..5a4faa85dbd2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -45,6 +45,7 @@ struct vop_ctrl {
struct vop_reg edp_en;
struct vop_reg hdmi_en;
struct vop_reg mipi_en;
+ struct vop_reg dp_en;
struct vop_reg out_mode;
struct vop_reg dither_down;
struct vop_reg dither_up;
@@ -53,6 +54,7 @@ struct vop_ctrl {
struct vop_reg hdmi_pin_pol;
struct vop_reg edp_pin_pol;
struct vop_reg mipi_pin_pol;
+ struct vop_reg dp_pin_pol;
struct vop_reg htotal_pw;
struct vop_reg hact_st_end;
@@ -244,6 +246,13 @@ enum scale_down_mode {
SCALE_DOWN_AVG = 0x1
};
+enum vop_pol {
+ HSYNC_POSITIVE = 0,
+ VSYNC_POSITIVE = 1,
+ DEN_NEGATIVE = 2,
+ DCLK_INVERT = 3
+};
+
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
#define SCL_FT_DEFAULT_FIXPOINT_SHIFT 12
#define SCL_MAX_VSKIPLINES 4
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 35c51f3402f2..91fbc7b52147 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -284,6 +284,7 @@ static const struct vop_data rk3288_vop = {
static const struct vop_ctrl rk3399_ctrl_data = {
.standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22),
.gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23),
+ .dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
.rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
@@ -293,6 +294,7 @@ static const struct vop_ctrl rk3399_ctrl_data = {
.data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19),
.out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0),
.rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
+ .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
.hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20),
.edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24),
.mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28),
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index d47dff95fe52..2a5b8466d806 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -655,13 +655,11 @@ void savage_driver_lastclose(struct drm_device *dev)
}
}
-int savage_driver_unload(struct drm_device *dev)
+void savage_driver_unload(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
kfree(dev_priv);
-
- return 0;
}
static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index 37b699571ad0..44a1009b6ecb 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -210,7 +210,7 @@ extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
extern int savage_driver_firstopen(struct drm_device *dev);
extern void savage_driver_lastclose(struct drm_device *dev);
-extern int savage_driver_unload(struct drm_device *dev);
+extern void savage_driver_unload(struct drm_device *dev);
extern void savage_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
new file mode 100644
index 000000000000..4aebfc7f27d4
--- /dev/null
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += test-drm_mm.o
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
new file mode 100644
index 000000000000..37bbdac52896
--- /dev/null
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -0,0 +1,24 @@
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as igt__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * Tests are executed in order by igt/drm_mm
+ */
+selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */
+selftest(init, igt_init)
+selftest(debug, igt_debug)
+selftest(reserve, igt_reserve)
+selftest(insert, igt_insert)
+selftest(replace, igt_replace)
+selftest(insert_range, igt_insert_range)
+selftest(align, igt_align)
+selftest(align32, igt_align32)
+selftest(align64, igt_align64)
+selftest(evict, igt_evict)
+selftest(evict_range, igt_evict_range)
+selftest(bottomup, igt_bottomup)
+selftest(topdown, igt_topdown)
+selftest(color, igt_color)
+selftest(color_evict, igt_color_evict)
+selftest(color_evict_range, igt_color_evict_range)
diff --git a/drivers/gpu/drm/selftests/drm_selftest.c b/drivers/gpu/drm/selftests/drm_selftest.c
new file mode 100644
index 000000000000..e29ed9faef5b
--- /dev/null
+++ b/drivers/gpu/drm/selftests/drm_selftest.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/compiler.h>
+
+#define selftest(name, func) __idx_##name,
+enum {
+#include TESTS
+};
+#undef selftest
+
+#define selftest(n, f) [__idx_##n] = { .name = #n, .func = f },
+static struct drm_selftest {
+ bool enabled;
+ const char *name;
+ int (*func)(void *);
+} selftests[] = {
+#include TESTS
+};
+#undef selftest
+
+/* Embed the line number into the parameter name so that we can order tests */
+#define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n))
+#define selftest_0(n, func, id) \
+module_param_named(id, selftests[__idx_##n].enabled, bool, 0400);
+#define selftest(n, func) selftest_0(n, func, param(n))
+#include TESTS
+#undef selftest
+
+static void set_default_test_all(struct drm_selftest *st, unsigned long count)
+{
+ unsigned long i;
+
+ for (i = 0; i < count; i++)
+ if (st[i].enabled)
+ return;
+
+ for (i = 0; i < count; i++)
+ st[i].enabled = true;
+}
+
+static int run_selftests(struct drm_selftest *st,
+ unsigned long count,
+ void *data)
+{
+ int err = 0;
+
+ set_default_test_all(st, count);
+
+ /* Tests are listed in natural order in drm_*_selftests.h */
+ for (; count--; st++) {
+ if (!st->enabled)
+ continue;
+
+ pr_debug("drm: Running %s\n", st->name);
+ err = st->func(data);
+ if (err)
+ break;
+ }
+
+ if (WARN(err > 0 || err == -ENOTTY,
+ "%s returned %d, conflicting with selftest's magic values!\n",
+ st->name, err))
+ err = -1;
+
+ rcu_barrier();
+ return err;
+}
+
+static int __maybe_unused
+__drm_subtests(const char *caller,
+ const struct drm_subtest *st,
+ int count,
+ void *data)
+{
+ int err;
+
+ for (; count--; st++) {
+ pr_debug("Running %s/%s\n", caller, st->name);
+ err = st->func(data);
+ if (err) {
+ pr_err("%s: %s failed with error %d\n",
+ caller, st->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/drm_selftest.h b/drivers/gpu/drm/selftests/drm_selftest.h
new file mode 100644
index 000000000000..c784ec02ff53
--- /dev/null
+++ b/drivers/gpu/drm/selftests/drm_selftest.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __DRM_SELFTEST_H__
+#define __DRM_SELFTEST_H__
+
+struct drm_subtest {
+ int (*func)(void *data);
+ const char *name;
+};
+
+static int __drm_subtests(const char *caller,
+ const struct drm_subtest *st,
+ int count,
+ void *data);
+#define drm_subtests(T, data) \
+ __drm_subtests(__func__, T, ARRAY_SIZE(T), data)
+
+#define SUBTEST(x) { x, #x }
+
+#endif /* __DRM_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
new file mode 100644
index 000000000000..1e71bc182ca9
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -0,0 +1,2276 @@
+/*
+ * Test cases for the drm_mm range manager
+ */
+
+#define pr_fmt(fmt) "drm_mm: " fmt
+
+#include <linux/module.h>
+#include <linux/prime_numbers.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_mm.h>
+
+#include "../lib/drm_random.h"
+
+#define TESTS "drm_mm_selftests.h"
+#include "drm_selftest.h"
+
+static unsigned int random_seed;
+static unsigned int max_iterations = 8192;
+static unsigned int max_prime = 128;
+
+enum {
+ BEST,
+ BOTTOMUP,
+ TOPDOWN,
+ EVICT,
+};
+
+static const struct insert_mode {
+ const char *name;
+ enum drm_mm_insert_mode mode;
+} insert_modes[] = {
+ [BEST] = { "best", DRM_MM_INSERT_BEST },
+ [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
+ [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
+ [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
+ {}
+}, evict_modes[] = {
+ { "bottom-up", DRM_MM_INSERT_LOW },
+ { "top-down", DRM_MM_INSERT_HIGH },
+ {}
+};
+
+static int igt_sanitycheck(void *ignored)
+{
+ pr_info("%s - ok!\n", __func__);
+ return 0;
+}
+
+static bool assert_no_holes(const struct drm_mm *mm)
+{
+ struct drm_mm_node *hole;
+ u64 hole_start, hole_end;
+ unsigned long count;
+
+ count = 0;
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
+ count++;
+ if (count) {
+ pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
+ return false;
+ }
+
+ drm_mm_for_each_node(hole, mm) {
+ if (drm_mm_hole_follows(hole)) {
+ pr_err("Hole follows node, expected none!\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
+{
+ struct drm_mm_node *hole;
+ u64 hole_start, hole_end;
+ unsigned long count;
+ bool ok = true;
+
+ if (end <= start)
+ return true;
+
+ count = 0;
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ if (start != hole_start || end != hole_end) {
+ if (ok)
+ pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
+ hole_start, hole_end,
+ start, end);
+ ok = false;
+ }
+ count++;
+ }
+ if (count != 1) {
+ pr_err("Expected to find one hole, found %lu instead\n", count);
+ ok = false;
+ }
+
+ return ok;
+}
+
+static bool assert_continuous(const struct drm_mm *mm, u64 size)
+{
+ struct drm_mm_node *node, *check, *found;
+ unsigned long n;
+ u64 addr;
+
+ if (!assert_no_holes(mm))
+ return false;
+
+ n = 0;
+ addr = 0;
+ drm_mm_for_each_node(node, mm) {
+ if (node->start != addr) {
+ pr_err("node[%ld] list out of order, expected %llx found %llx\n",
+ n, addr, node->start);
+ return false;
+ }
+
+ if (node->size != size) {
+ pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
+ n, size, node->size);
+ return false;
+ }
+
+ if (drm_mm_hole_follows(node)) {
+ pr_err("node[%ld] is followed by a hole!\n", n);
+ return false;
+ }
+
+ found = NULL;
+ drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
+ if (node != check) {
+ pr_err("lookup return wrong node, expected start %llx, found %llx\n",
+ node->start, check->start);
+ return false;
+ }
+ found = check;
+ }
+ if (!found) {
+ pr_err("lookup failed for node %llx + %llx\n",
+ addr, size);
+ return false;
+ }
+
+ addr += size;
+ n++;
+ }
+
+ return true;
+}
+
+static u64 misalignment(struct drm_mm_node *node, u64 alignment)
+{
+ u64 rem;
+
+ if (!alignment)
+ return 0;
+
+ div64_u64_rem(node->start, alignment, &rem);
+ return rem;
+}
+
+static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
+ u64 size, u64 alignment, unsigned long color)
+{
+ bool ok = true;
+
+ if (!drm_mm_node_allocated(node) || node->mm != mm) {
+ pr_err("node not allocated\n");
+ ok = false;
+ }
+
+ if (node->size != size) {
+ pr_err("node has wrong size, found %llu, expected %llu\n",
+ node->size, size);
+ ok = false;
+ }
+
+ if (misalignment(node, alignment)) {
+ pr_err("node is misalinged, start %llx rem %llu, expected alignment %llu\n",
+ node->start, misalignment(node, alignment), alignment);
+ ok = false;
+ }
+
+ if (node->color != color) {
+ pr_err("node has wrong color, found %lu, expected %lu\n",
+ node->color, color);
+ ok = false;
+ }
+
+ return ok;
+}
+
+#define show_mm(mm) do { \
+ struct drm_printer __p = drm_debug_printer(__func__); \
+ drm_mm_print((mm), &__p); } while (0)
+
+static int igt_init(void *ignored)
+{
+ const unsigned int size = 4096;
+ struct drm_mm mm;
+ struct drm_mm_node tmp;
+ int ret = -EINVAL;
+
+ /* Start with some simple checks on initialising the struct drm_mm */
+ memset(&mm, 0, sizeof(mm));
+ if (drm_mm_initialized(&mm)) {
+ pr_err("zeroed mm claims to be initialized\n");
+ return ret;
+ }
+
+ memset(&mm, 0xff, sizeof(mm));
+ drm_mm_init(&mm, 0, size);
+ if (!drm_mm_initialized(&mm)) {
+ pr_err("mm claims not to be initialized\n");
+ goto out;
+ }
+
+ if (!drm_mm_clean(&mm)) {
+ pr_err("mm not empty on creation\n");
+ goto out;
+ }
+
+ /* After creation, it should all be one massive hole */
+ if (!assert_one_hole(&mm, 0, size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.start = 0;
+ tmp.size = size;
+ ret = drm_mm_reserve_node(&mm, &tmp);
+ if (ret) {
+ pr_err("failed to reserve whole drm_mm\n");
+ goto out;
+ }
+
+ /* After filling the range entirely, there should be no holes */
+ if (!assert_no_holes(&mm)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* And then after emptying it again, the massive hole should be back */
+ drm_mm_remove_node(&tmp);
+ if (!assert_one_hole(&mm, 0, size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ if (ret)
+ show_mm(&mm);
+ drm_mm_takedown(&mm);
+ return ret;
+}
+
+static int igt_debug(void *ignored)
+{
+ struct drm_mm mm;
+ struct drm_mm_node nodes[2];
+ int ret;
+
+ /* Create a small drm_mm with a couple of nodes and a few holes, and
+ * check that the debug iterator doesn't explode over a trivial drm_mm.
+ */
+
+ drm_mm_init(&mm, 0, 4096);
+
+ memset(nodes, 0, sizeof(nodes));
+ nodes[0].start = 512;
+ nodes[0].size = 1024;
+ ret = drm_mm_reserve_node(&mm, &nodes[0]);
+ if (ret) {
+ pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
+ nodes[0].start, nodes[0].size);
+ return ret;
+ }
+
+ nodes[1].size = 1024;
+ nodes[1].start = 4096 - 512 - nodes[1].size;
+ ret = drm_mm_reserve_node(&mm, &nodes[1]);
+ if (ret) {
+ pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
+ nodes[1].start, nodes[1].size);
+ return ret;
+ }
+
+ show_mm(&mm);
+ return 0;
+}
+
+static struct drm_mm_node *set_node(struct drm_mm_node *node,
+ u64 start, u64 size)
+{
+ node->start = start;
+ node->size = size;
+ return node;
+}
+
+static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
+{
+ int err;
+
+ err = drm_mm_reserve_node(mm, node);
+ if (likely(err == -ENOSPC))
+ return true;
+
+ if (!err) {
+ pr_err("impossible reserve succeeded, node %llu + %llu\n",
+ node->start, node->size);
+ drm_mm_remove_node(node);
+ } else {
+ pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
+ err, -ENOSPC, node->start, node->size);
+ }
+ return false;
+}
+
+static bool check_reserve_boundaries(struct drm_mm *mm,
+ unsigned int count,
+ u64 size)
+{
+ const struct boundary {
+ u64 start, size;
+ const char *name;
+ } boundaries[] = {
+#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
+ B(0, 0),
+ B(-size, 0),
+ B(size, 0),
+ B(size * count, 0),
+ B(-size, size),
+ B(-size, -size),
+ B(-size, 2*size),
+ B(0, -size),
+ B(size, -size),
+ B(count*size, size),
+ B(count*size, -size),
+ B(count*size, count*size),
+ B(count*size, -count*size),
+ B(count*size, -(count+1)*size),
+ B((count+1)*size, size),
+ B((count+1)*size, -size),
+ B((count+1)*size, -2*size),
+#undef B
+ };
+ struct drm_mm_node tmp = {};
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
+ if (!expect_reserve_fail(mm,
+ set_node(&tmp,
+ boundaries[n].start,
+ boundaries[n].size))) {
+ pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
+ n, boundaries[n].name, count, size);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int __igt_reserve(unsigned int count, u64 size)
+{
+ DRM_RND_STATE(prng, random_seed);
+ struct drm_mm mm;
+ struct drm_mm_node tmp, *nodes, *node, *next;
+ unsigned int *order, n, m, o = 0;
+ int ret, err;
+
+ /* For exercising drm_mm_reserve_node(), we want to check that
+ * reservations outside of the drm_mm range are rejected, and to
+ * overlapping and otherwise already occupied ranges. Afterwards,
+ * the tree and nodes should be intact.
+ */
+
+ DRM_MM_BUG_ON(!count);
+ DRM_MM_BUG_ON(!size);
+
+ ret = -ENOMEM;
+ order = drm_random_order(count, &prng);
+ if (!order)
+ goto err;
+
+ nodes = vzalloc(sizeof(*nodes) * count);
+ if (!nodes)
+ goto err_order;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, count * size);
+
+ if (!check_reserve_boundaries(&mm, count, size))
+ goto out;
+
+ for (n = 0; n < count; n++) {
+ nodes[n].start = order[n] * size;
+ nodes[n].size = size;
+
+ err = drm_mm_reserve_node(&mm, &nodes[n]);
+ if (err) {
+ pr_err("reserve failed, step %d, start %llu\n",
+ n, nodes[n].start);
+ ret = err;
+ goto out;
+ }
+
+ if (!drm_mm_node_allocated(&nodes[n])) {
+ pr_err("reserved node not allocated! step %d, start %llu\n",
+ n, nodes[n].start);
+ goto out;
+ }
+
+ if (!expect_reserve_fail(&mm, &nodes[n]))
+ goto out;
+ }
+
+ /* After random insertion the nodes should be in order */
+ if (!assert_continuous(&mm, size))
+ goto out;
+
+ /* Repeated use should then fail */
+ drm_random_reorder(order, count, &prng);
+ for (n = 0; n < count; n++) {
+ if (!expect_reserve_fail(&mm,
+ set_node(&tmp, order[n] * size, 1)))
+ goto out;
+
+ /* Remove and reinsert should work */
+ drm_mm_remove_node(&nodes[order[n]]);
+ err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
+ if (err) {
+ pr_err("reserve failed, step %d, start %llu\n",
+ n, nodes[n].start);
+ ret = err;
+ goto out;
+ }
+ }
+
+ if (!assert_continuous(&mm, size))
+ goto out;
+
+ /* Overlapping use should then fail */
+ for (n = 0; n < count; n++) {
+ if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
+ goto out;
+ }
+ for (n = 0; n < count; n++) {
+ if (!expect_reserve_fail(&mm,
+ set_node(&tmp,
+ size * n,
+ size * (count - n))))
+ goto out;
+ }
+
+ /* Remove several, reinsert, check full */
+ for_each_prime_number(n, min(max_prime, count)) {
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ drm_mm_remove_node(node);
+ }
+
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ err = drm_mm_reserve_node(&mm, node);
+ if (err) {
+ pr_err("reserve failed, step %d/%d, start %llu\n",
+ m, n, node->start);
+ ret = err;
+ goto out;
+ }
+ }
+
+ o += n;
+
+ if (!assert_continuous(&mm, size))
+ goto out;
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ vfree(nodes);
+err_order:
+ kfree(order);
+err:
+ return ret;
+}
+
+static int igt_reserve(void *ignored)
+{
+ const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
+ int n, ret;
+
+ for_each_prime_number_from(n, 1, 54) {
+ u64 size = BIT_ULL(n);
+
+ ret = __igt_reserve(count, size - 1);
+ if (ret)
+ return ret;
+
+ ret = __igt_reserve(count, size);
+ if (ret)
+ return ret;
+
+ ret = __igt_reserve(count, size + 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
+ u64 size, u64 alignment, unsigned long color,
+ const struct insert_mode *mode)
+{
+ int err;
+
+ err = drm_mm_insert_node_generic(mm, node,
+ size, alignment, color,
+ mode->mode);
+ if (err) {
+ pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
+ size, alignment, color, mode->name, err);
+ return false;
+ }
+
+ if (!assert_node(node, mm, size, alignment, color)) {
+ drm_mm_remove_node(node);
+ return false;
+ }
+
+ return true;
+}
+
+static bool expect_insert_fail(struct drm_mm *mm, u64 size)
+{
+ struct drm_mm_node tmp = {};
+ int err;
+
+ err = drm_mm_insert_node(mm, &tmp, size);
+ if (likely(err == -ENOSPC))
+ return true;
+
+ if (!err) {
+ pr_err("impossible insert succeeded, node %llu + %llu\n",
+ tmp.start, tmp.size);
+ drm_mm_remove_node(&tmp);
+ } else {
+ pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
+ err, -ENOSPC, size);
+ }
+ return false;
+}
+
+static int __igt_insert(unsigned int count, u64 size, bool replace)
+{
+ DRM_RND_STATE(prng, random_seed);
+ const struct insert_mode *mode;
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int *order, n, m, o = 0;
+ int ret;
+
+ /* Fill a range with lots of nodes, check it doesn't fail too early */
+
+ DRM_MM_BUG_ON(!count);
+ DRM_MM_BUG_ON(!size);
+
+ ret = -ENOMEM;
+ nodes = vmalloc(count * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ order = drm_random_order(count, &prng);
+ if (!order)
+ goto err_nodes;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, count * size);
+
+ for (mode = insert_modes; mode->name; mode++) {
+ for (n = 0; n < count; n++) {
+ struct drm_mm_node tmp;
+
+ node = replace ? &tmp : &nodes[n];
+ memset(node, 0, sizeof(*node));
+ if (!expect_insert(&mm, node, size, 0, n, mode)) {
+ pr_err("%s insert failed, size %llu step %d\n",
+ mode->name, size, n);
+ goto out;
+ }
+
+ if (replace) {
+ drm_mm_replace_node(&tmp, &nodes[n]);
+ if (drm_mm_node_allocated(&tmp)) {
+ pr_err("replaced old-node still allocated! step %d\n",
+ n);
+ goto out;
+ }
+
+ if (!assert_node(&nodes[n], &mm, size, 0, n)) {
+ pr_err("replaced node did not inherit parameters, size %llu step %d\n",
+ size, n);
+ goto out;
+ }
+
+ if (tmp.start != nodes[n].start) {
+ pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
+ tmp.start, size,
+ nodes[n].start, nodes[n].size);
+ goto out;
+ }
+ }
+ }
+
+ /* After random insertion the nodes should be in order */
+ if (!assert_continuous(&mm, size))
+ goto out;
+
+ /* Repeated use should then fail */
+ if (!expect_insert_fail(&mm, size))
+ goto out;
+
+ /* Remove one and reinsert, as the only hole it should refill itself */
+ for (n = 0; n < count; n++) {
+ u64 addr = nodes[n].start;
+
+ drm_mm_remove_node(&nodes[n]);
+ if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
+ pr_err("%s reinsert failed, size %llu step %d\n",
+ mode->name, size, n);
+ goto out;
+ }
+
+ if (nodes[n].start != addr) {
+ pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
+ mode->name, n, addr, nodes[n].start);
+ goto out;
+ }
+
+ if (!assert_continuous(&mm, size))
+ goto out;
+ }
+
+ /* Remove several, reinsert, check full */
+ for_each_prime_number(n, min(max_prime, count)) {
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ drm_mm_remove_node(node);
+ }
+
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ if (!expect_insert(&mm, node, size, 0, n, mode)) {
+ pr_err("%s multiple reinsert failed, size %llu step %d\n",
+ mode->name, size, n);
+ goto out;
+ }
+ }
+
+ o += n;
+
+ if (!assert_continuous(&mm, size))
+ goto out;
+
+ if (!expect_insert_fail(&mm, size))
+ goto out;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+err:
+ return ret;
+}
+
+static int igt_insert(void *ignored)
+{
+ const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
+ unsigned int n;
+ int ret;
+
+ for_each_prime_number_from(n, 1, 54) {
+ u64 size = BIT_ULL(n);
+
+ ret = __igt_insert(count, size - 1, false);
+ if (ret)
+ return ret;
+
+ ret = __igt_insert(count, size, false);
+ if (ret)
+ return ret;
+
+ ret = __igt_insert(count, size + 1, false);
+ }
+
+ return 0;
+}
+
+static int igt_replace(void *ignored)
+{
+ const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
+ unsigned int n;
+ int ret;
+
+ /* Reuse igt_insert to exercise replacement by inserting a dummy node,
+ * then replacing it with the intended node. We want to check that
+ * the tree is intact and all the information we need is carried
+ * across to the target node.
+ */
+
+ for_each_prime_number_from(n, 1, 54) {
+ u64 size = BIT_ULL(n);
+
+ ret = __igt_insert(count, size - 1, true);
+ if (ret)
+ return ret;
+
+ ret = __igt_insert(count, size, true);
+ if (ret)
+ return ret;
+
+ ret = __igt_insert(count, size + 1, true);
+ }
+
+ return 0;
+}
+
+static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ u64 size, u64 alignment, unsigned long color,
+ u64 range_start, u64 range_end,
+ const struct insert_mode *mode)
+{
+ int err;
+
+ err = drm_mm_insert_node_in_range(mm, node,
+ size, alignment, color,
+ range_start, range_end,
+ mode->mode);
+ if (err) {
+ pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
+ size, alignment, color, mode->name,
+ range_start, range_end, err);
+ return false;
+ }
+
+ if (!assert_node(node, mm, size, alignment, color)) {
+ drm_mm_remove_node(node);
+ return false;
+ }
+
+ return true;
+}
+
+static bool expect_insert_in_range_fail(struct drm_mm *mm,
+ u64 size,
+ u64 range_start,
+ u64 range_end)
+{
+ struct drm_mm_node tmp = {};
+ int err;
+
+ err = drm_mm_insert_node_in_range(mm, &tmp,
+ size, 0, 0,
+ range_start, range_end,
+ 0);
+ if (likely(err == -ENOSPC))
+ return true;
+
+ if (!err) {
+ pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
+ tmp.start, tmp.size, range_start, range_end);
+ drm_mm_remove_node(&tmp);
+ } else {
+ pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
+ err, -ENOSPC, size, range_start, range_end);
+ }
+
+ return false;
+}
+
+static bool assert_contiguous_in_range(struct drm_mm *mm,
+ u64 size,
+ u64 start,
+ u64 end)
+{
+ struct drm_mm_node *node;
+ unsigned int n;
+
+ if (!expect_insert_in_range_fail(mm, size, start, end))
+ return false;
+
+ n = div64_u64(start + size - 1, size);
+ drm_mm_for_each_node(node, mm) {
+ if (node->start < start || node->start + node->size > end) {
+ pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
+ n, node->start, node->start + node->size, start, end);
+ return false;
+ }
+
+ if (node->start != n * size) {
+ pr_err("node %d out of order, expected start %llx, found %llx\n",
+ n, n * size, node->start);
+ return false;
+ }
+
+ if (node->size != size) {
+ pr_err("node %d has wrong size, expected size %llx, found %llx\n",
+ n, size, node->size);
+ return false;
+ }
+
+ if (drm_mm_hole_follows(node) &&
+ drm_mm_hole_node_end(node) < end) {
+ pr_err("node %d is followed by a hole!\n", n);
+ return false;
+ }
+
+ n++;
+ }
+
+ drm_mm_for_each_node_in_range(node, mm, 0, start) {
+ if (node) {
+ pr_err("node before start: node=%llx+%llu, start=%llx\n",
+ node->start, node->size, start);
+ return false;
+ }
+ }
+
+ drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) {
+ if (node) {
+ pr_err("node after end: node=%llx+%llu, end=%llx\n",
+ node->start, node->size, end);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
+{
+ const struct insert_mode *mode;
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int n, start_n, end_n;
+ int ret;
+
+ DRM_MM_BUG_ON(!count);
+ DRM_MM_BUG_ON(!size);
+ DRM_MM_BUG_ON(end <= start);
+
+ /* Very similar to __igt_insert(), but now instead of populating the
+ * full range of the drm_mm, we try to fill a small portion of it.
+ */
+
+ ret = -ENOMEM;
+ nodes = vzalloc(count * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, count * size);
+
+ start_n = div64_u64(start + size - 1, size);
+ end_n = div64_u64(end - size, size);
+
+ for (mode = insert_modes; mode->name; mode++) {
+ for (n = start_n; n <= end_n; n++) {
+ if (!expect_insert_in_range(&mm, &nodes[n],
+ size, size, n,
+ start, end, mode)) {
+ pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
+ mode->name, size, n,
+ start_n, end_n,
+ start, end);
+ goto out;
+ }
+ }
+
+ if (!assert_contiguous_in_range(&mm, size, start, end)) {
+ pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
+ mode->name, start, end, size);
+ goto out;
+ }
+
+ /* Remove one and reinsert, it should refill itself */
+ for (n = start_n; n <= end_n; n++) {
+ u64 addr = nodes[n].start;
+
+ drm_mm_remove_node(&nodes[n]);
+ if (!expect_insert_in_range(&mm, &nodes[n],
+ size, size, n,
+ start, end, mode)) {
+ pr_err("%s reinsert failed, step %d\n", mode->name, n);
+ goto out;
+ }
+
+ if (nodes[n].start != addr) {
+ pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
+ mode->name, n, addr, nodes[n].start);
+ goto out;
+ }
+ }
+
+ if (!assert_contiguous_in_range(&mm, size, start, end)) {
+ pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
+ mode->name, start, end, size);
+ goto out;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ vfree(nodes);
+err:
+ return ret;
+}
+
+static int insert_outside_range(void)
+{
+ struct drm_mm mm;
+ const unsigned int start = 1024;
+ const unsigned int end = 2048;
+ const unsigned int size = end - start;
+
+ drm_mm_init(&mm, start, size);
+
+ if (!expect_insert_in_range_fail(&mm, 1, 0, start))
+ return -EINVAL;
+
+ if (!expect_insert_in_range_fail(&mm, size,
+ start - size/2, start + (size+1)/2))
+ return -EINVAL;
+
+ if (!expect_insert_in_range_fail(&mm, size,
+ end - (size+1)/2, end + size/2))
+ return -EINVAL;
+
+ if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
+ return -EINVAL;
+
+ drm_mm_takedown(&mm);
+ return 0;
+}
+
+static int igt_insert_range(void *ignored)
+{
+ const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
+ unsigned int n;
+ int ret;
+
+ /* Check that requests outside the bounds of drm_mm are rejected. */
+ ret = insert_outside_range();
+ if (ret)
+ return ret;
+
+ for_each_prime_number_from(n, 1, 50) {
+ const u64 size = BIT_ULL(n);
+ const u64 max = count * size;
+
+ ret = __igt_insert_range(count, size, 0, max);
+ if (ret)
+ return ret;
+
+ ret = __igt_insert_range(count, size, 1, max);
+ if (ret)
+ return ret;
+
+ ret = __igt_insert_range(count, size, 0, max - 1);
+ if (ret)
+ return ret;
+
+ ret = __igt_insert_range(count, size, 0, max/2);
+ if (ret)
+ return ret;
+
+ ret = __igt_insert_range(count, size, max/2, max);
+ if (ret)
+ return ret;
+
+ ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int igt_align(void *ignored)
+{
+ const struct insert_mode *mode;
+ const unsigned int max_count = min(8192u, max_prime);
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int prime;
+ int ret = -EINVAL;
+
+ /* For each of the possible insertion modes, we pick a few
+ * arbitrary alignments and check that the inserted node
+ * meets our requirements.
+ */
+
+ nodes = vzalloc(max_count * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ drm_mm_init(&mm, 1, U64_MAX - 2);
+
+ for (mode = insert_modes; mode->name; mode++) {
+ unsigned int i = 0;
+
+ for_each_prime_number_from(prime, 1, max_count) {
+ u64 size = next_prime_number(prime);
+
+ if (!expect_insert(&mm, &nodes[i],
+ size, prime, i,
+ mode)) {
+ pr_err("%s insert failed with alignment=%d",
+ mode->name, prime);
+ goto out;
+ }
+
+ i++;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ vfree(nodes);
+err:
+ return ret;
+}
+
+static int igt_align_pot(int max)
+{
+ struct drm_mm mm;
+ struct drm_mm_node *node, *next;
+ int bit;
+ int ret = -EINVAL;
+
+ /* Check that we can align to the full u64 address space */
+
+ drm_mm_init(&mm, 1, U64_MAX - 2);
+
+ for (bit = max - 1; bit; bit--) {
+ u64 align, size;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ align = BIT_ULL(bit);
+ size = BIT_ULL(bit-1) + 1;
+ if (!expect_insert(&mm, node,
+ size, align, bit,
+ &insert_modes[0])) {
+ pr_err("insert failed with alignment=%llx [%d]",
+ align, bit);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm) {
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+ drm_mm_takedown(&mm);
+ return ret;
+}
+
+static int igt_align32(void *ignored)
+{
+ return igt_align_pot(32);
+}
+
+static int igt_align64(void *ignored)
+{
+ return igt_align_pot(64);
+}
+
+static void show_scan(const struct drm_mm_scan *scan)
+{
+ pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
+ scan->hit_start, scan->hit_end,
+ scan->size, scan->alignment, scan->color);
+}
+
+static void show_holes(const struct drm_mm *mm, int count)
+{
+ u64 hole_start, hole_end;
+ struct drm_mm_node *hole;
+
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ struct drm_mm_node *next = list_next_entry(hole, node_list);
+ const char *node1 = NULL, *node2 = NULL;
+
+ if (hole->allocated)
+ node1 = kasprintf(GFP_KERNEL,
+ "[%llx + %lld, color=%ld], ",
+ hole->start, hole->size, hole->color);
+
+ if (next->allocated)
+ node2 = kasprintf(GFP_KERNEL,
+ ", [%llx + %lld, color=%ld]",
+ next->start, next->size, next->color);
+
+ pr_info("%sHole [%llx - %llx, size %lld]%s\n",
+ node1,
+ hole_start, hole_end, hole_end - hole_start,
+ node2);
+
+ kfree(node2);
+ kfree(node1);
+
+ if (!--count)
+ break;
+ }
+}
+
+struct evict_node {
+ struct drm_mm_node node;
+ struct list_head link;
+};
+
+static bool evict_nodes(struct drm_mm_scan *scan,
+ struct evict_node *nodes,
+ unsigned int *order,
+ unsigned int count,
+ bool use_color,
+ struct list_head *evict_list)
+{
+ struct evict_node *e, *en;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ e = &nodes[order ? order[i] : i];
+ list_add(&e->link, evict_list);
+ if (drm_mm_scan_add_block(scan, &e->node))
+ break;
+ }
+ list_for_each_entry_safe(e, en, evict_list, link) {
+ if (!drm_mm_scan_remove_block(scan, &e->node))
+ list_del(&e->link);
+ }
+ if (list_empty(evict_list)) {
+ pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
+ scan->size, count, scan->alignment, scan->color);
+ return false;
+ }
+
+ list_for_each_entry(e, evict_list, link)
+ drm_mm_remove_node(&e->node);
+
+ if (use_color) {
+ struct drm_mm_node *node;
+
+ while ((node = drm_mm_scan_color_evict(scan))) {
+ e = container_of(node, typeof(*e), node);
+ drm_mm_remove_node(&e->node);
+ list_add(&e->link, evict_list);
+ }
+ } else {
+ if (drm_mm_scan_color_evict(scan)) {
+ pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool evict_nothing(struct drm_mm *mm,
+ unsigned int total_size,
+ struct evict_node *nodes)
+{
+ struct drm_mm_scan scan;
+ LIST_HEAD(evict_list);
+ struct evict_node *e;
+ struct drm_mm_node *node;
+ unsigned int n;
+
+ drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
+ for (n = 0; n < total_size; n++) {
+ e = &nodes[n];
+ list_add(&e->link, &evict_list);
+ drm_mm_scan_add_block(&scan, &e->node);
+ }
+ list_for_each_entry(e, &evict_list, link)
+ drm_mm_scan_remove_block(&scan, &e->node);
+
+ for (n = 0; n < total_size; n++) {
+ e = &nodes[n];
+
+ if (!drm_mm_node_allocated(&e->node)) {
+ pr_err("node[%d] no longer allocated!\n", n);
+ return false;
+ }
+
+ e->link.next = NULL;
+ }
+
+ drm_mm_for_each_node(node, mm) {
+ e = container_of(node, typeof(*e), node);
+ e->link.next = &e->link;
+ }
+
+ for (n = 0; n < total_size; n++) {
+ e = &nodes[n];
+
+ if (!e->link.next) {
+ pr_err("node[%d] no longer connected!\n", n);
+ return false;
+ }
+ }
+
+ return assert_continuous(mm, nodes[0].node.size);
+}
+
+static bool evict_everything(struct drm_mm *mm,
+ unsigned int total_size,
+ struct evict_node *nodes)
+{
+ struct drm_mm_scan scan;
+ LIST_HEAD(evict_list);
+ struct evict_node *e;
+ unsigned int n;
+ int err;
+
+ drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
+ for (n = 0; n < total_size; n++) {
+ e = &nodes[n];
+ list_add(&e->link, &evict_list);
+ if (drm_mm_scan_add_block(&scan, &e->node))
+ break;
+ }
+
+ err = 0;
+ list_for_each_entry(e, &evict_list, link) {
+ if (!drm_mm_scan_remove_block(&scan, &e->node)) {
+ if (!err) {
+ pr_err("Node %lld not marked for eviction!\n",
+ e->node.start);
+ err = -EINVAL;
+ }
+ }
+ }
+ if (err)
+ return false;
+
+ list_for_each_entry(e, &evict_list, link)
+ drm_mm_remove_node(&e->node);
+
+ if (!assert_one_hole(mm, 0, total_size))
+ return false;
+
+ list_for_each_entry(e, &evict_list, link) {
+ err = drm_mm_reserve_node(mm, &e->node);
+ if (err) {
+ pr_err("Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
+ return false;
+ }
+ }
+
+ return assert_continuous(mm, nodes[0].node.size);
+}
+
+static int evict_something(struct drm_mm *mm,
+ u64 range_start, u64 range_end,
+ struct evict_node *nodes,
+ unsigned int *order,
+ unsigned int count,
+ unsigned int size,
+ unsigned int alignment,
+ const struct insert_mode *mode)
+{
+ struct drm_mm_scan scan;
+ LIST_HEAD(evict_list);
+ struct evict_node *e;
+ struct drm_mm_node tmp;
+ int err;
+
+ drm_mm_scan_init_with_range(&scan, mm,
+ size, alignment, 0,
+ range_start, range_end,
+ mode->mode);
+ if (!evict_nodes(&scan,
+ nodes, order, count, false,
+ &evict_list))
+ return -EINVAL;
+
+ memset(&tmp, 0, sizeof(tmp));
+ err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
+ DRM_MM_INSERT_EVICT);
+ if (err) {
+ pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
+ size, alignment);
+ show_scan(&scan);
+ show_holes(mm, 3);
+ return err;
+ }
+
+ if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
+ pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
+ tmp.start, tmp.size, range_start, range_end);
+ err = -EINVAL;
+ }
+
+ if (!assert_node(&tmp, mm, size, alignment, 0) ||
+ drm_mm_hole_follows(&tmp)) {
+ pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
+ tmp.size, size,
+ alignment, misalignment(&tmp, alignment),
+ tmp.start, drm_mm_hole_follows(&tmp));
+ err = -EINVAL;
+ }
+
+ drm_mm_remove_node(&tmp);
+ if (err)
+ return err;
+
+ list_for_each_entry(e, &evict_list, link) {
+ err = drm_mm_reserve_node(mm, &e->node);
+ if (err) {
+ pr_err("Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
+ return err;
+ }
+ }
+
+ if (!assert_continuous(mm, nodes[0].node.size)) {
+ pr_err("range is no longer continuous\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igt_evict(void *ignored)
+{
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int size = 8192;
+ const struct insert_mode *mode;
+ struct drm_mm mm;
+ struct evict_node *nodes;
+ struct drm_mm_node *node, *next;
+ unsigned int *order, n;
+ int ret, err;
+
+ /* Here we populate a full drm_mm and then try and insert a new node
+ * by evicting other nodes in a random order. The drm_mm_scan should
+ * pick the first matching hole it finds from the random list. We
+ * repeat that for different allocation strategies, alignments and
+ * sizes to try and stress the hole finder.
+ */
+
+ ret = -ENOMEM;
+ nodes = vzalloc(size * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ order = drm_random_order(size, &prng);
+ if (!order)
+ goto err_nodes;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, size);
+ for (n = 0; n < size; n++) {
+ err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
+ if (err) {
+ pr_err("insert failed, step %d\n", n);
+ ret = err;
+ goto out;
+ }
+ }
+
+ /* First check that using the scanner doesn't break the mm */
+ if (!evict_nothing(&mm, size, nodes)) {
+ pr_err("evict_nothing() failed\n");
+ goto out;
+ }
+ if (!evict_everything(&mm, size, nodes)) {
+ pr_err("evict_everything() failed\n");
+ goto out;
+ }
+
+ for (mode = evict_modes; mode->name; mode++) {
+ for (n = 1; n <= size; n <<= 1) {
+ drm_random_reorder(order, size, &prng);
+ err = evict_something(&mm, 0, U64_MAX,
+ nodes, order, size,
+ n, 1,
+ mode);
+ if (err) {
+ pr_err("%s evict_something(size=%u) failed\n",
+ mode->name, n);
+ ret = err;
+ goto out;
+ }
+ }
+
+ for (n = 1; n < size; n <<= 1) {
+ drm_random_reorder(order, size, &prng);
+ err = evict_something(&mm, 0, U64_MAX,
+ nodes, order, size,
+ size/2, n,
+ mode);
+ if (err) {
+ pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
+ mode->name, size/2, n);
+ ret = err;
+ goto out;
+ }
+ }
+
+ for_each_prime_number_from(n, 1, min(size, max_prime)) {
+ unsigned int nsize = (size - n + 1) / 2;
+
+ DRM_MM_BUG_ON(!nsize);
+
+ drm_random_reorder(order, size, &prng);
+ err = evict_something(&mm, 0, U64_MAX,
+ nodes, order, size,
+ nsize, n,
+ mode);
+ if (err) {
+ pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
+ mode->name, nsize, n);
+ ret = err;
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+err:
+ return ret;
+}
+
+static int igt_evict_range(void *ignored)
+{
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int size = 8192;
+ const unsigned int range_size = size / 2;
+ const unsigned int range_start = size / 4;
+ const unsigned int range_end = range_start + range_size;
+ const struct insert_mode *mode;
+ struct drm_mm mm;
+ struct evict_node *nodes;
+ struct drm_mm_node *node, *next;
+ unsigned int *order, n;
+ int ret, err;
+
+ /* Like igt_evict() but now we are limiting the search to a
+ * small portion of the full drm_mm.
+ */
+
+ ret = -ENOMEM;
+ nodes = vzalloc(size * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ order = drm_random_order(size, &prng);
+ if (!order)
+ goto err_nodes;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, size);
+ for (n = 0; n < size; n++) {
+ err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
+ if (err) {
+ pr_err("insert failed, step %d\n", n);
+ ret = err;
+ goto out;
+ }
+ }
+
+ for (mode = evict_modes; mode->name; mode++) {
+ for (n = 1; n <= range_size; n <<= 1) {
+ drm_random_reorder(order, size, &prng);
+ err = evict_something(&mm, range_start, range_end,
+ nodes, order, size,
+ n, 1,
+ mode);
+ if (err) {
+ pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
+ mode->name, n, range_start, range_end);
+ goto out;
+ }
+ }
+
+ for (n = 1; n <= range_size; n <<= 1) {
+ drm_random_reorder(order, size, &prng);
+ err = evict_something(&mm, range_start, range_end,
+ nodes, order, size,
+ range_size/2, n,
+ mode);
+ if (err) {
+ pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
+ mode->name, range_size/2, n, range_start, range_end);
+ goto out;
+ }
+ }
+
+ for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
+ unsigned int nsize = (range_size - n + 1) / 2;
+
+ DRM_MM_BUG_ON(!nsize);
+
+ drm_random_reorder(order, size, &prng);
+ err = evict_something(&mm, range_start, range_end,
+ nodes, order, size,
+ nsize, n,
+ mode);
+ if (err) {
+ pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
+ mode->name, nsize, n, range_start, range_end);
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+err:
+ return ret;
+}
+
+static unsigned int node_index(const struct drm_mm_node *node)
+{
+ return div64_u64(node->start, node->size);
+}
+
+static int igt_topdown(void *ignored)
+{
+ const struct insert_mode *topdown = &insert_modes[TOPDOWN];
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int count = 8192;
+ unsigned int size;
+ unsigned long *bitmap = NULL;
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int *order, n, m, o = 0;
+ int ret;
+
+ /* When allocating top-down, we expect to be returned a node
+ * from a suitable hole at the top of the drm_mm. We check that
+ * the returned node does match the highest available slot.
+ */
+
+ ret = -ENOMEM;
+ nodes = vzalloc(count * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
+ GFP_TEMPORARY);
+ if (!bitmap)
+ goto err_nodes;
+
+ order = drm_random_order(count, &prng);
+ if (!order)
+ goto err_bitmap;
+
+ ret = -EINVAL;
+ for (size = 1; size <= 64; size <<= 1) {
+ drm_mm_init(&mm, 0, size*count);
+ for (n = 0; n < count; n++) {
+ if (!expect_insert(&mm, &nodes[n],
+ size, 0, n,
+ topdown)) {
+ pr_err("insert failed, size %u step %d\n", size, n);
+ goto out;
+ }
+
+ if (drm_mm_hole_follows(&nodes[n])) {
+ pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
+ n, nodes[n].start, size);
+ goto out;
+ }
+
+ if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
+ goto out;
+ }
+
+ if (!assert_continuous(&mm, size))
+ goto out;
+
+ drm_random_reorder(order, count, &prng);
+ for_each_prime_number_from(n, 1, min(count, max_prime)) {
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ drm_mm_remove_node(node);
+ __set_bit(node_index(node), bitmap);
+ }
+
+ for (m = 0; m < n; m++) {
+ unsigned int last;
+
+ node = &nodes[order[(o + m) % count]];
+ if (!expect_insert(&mm, node,
+ size, 0, 0,
+ topdown)) {
+ pr_err("insert failed, step %d/%d\n", m, n);
+ goto out;
+ }
+
+ if (drm_mm_hole_follows(node)) {
+ pr_err("hole after topdown insert %d/%d, start=%llx\n",
+ m, n, node->start);
+ goto out;
+ }
+
+ last = find_last_bit(bitmap, count);
+ if (node_index(node) != last) {
+ pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
+ m, n, size, last, node_index(node));
+ goto out;
+ }
+
+ __clear_bit(last, bitmap);
+ }
+
+ DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
+
+ o += n;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_bitmap:
+ kfree(bitmap);
+err_nodes:
+ vfree(nodes);
+err:
+ return ret;
+}
+
+static int igt_bottomup(void *ignored)
+{
+ const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int count = 8192;
+ unsigned int size;
+ unsigned long *bitmap;
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int *order, n, m, o = 0;
+ int ret;
+
+ /* Like igt_topdown, but instead of searching for the last hole,
+ * we search for the first.
+ */
+
+ ret = -ENOMEM;
+ nodes = vzalloc(count * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
+ GFP_TEMPORARY);
+ if (!bitmap)
+ goto err_nodes;
+
+ order = drm_random_order(count, &prng);
+ if (!order)
+ goto err_bitmap;
+
+ ret = -EINVAL;
+ for (size = 1; size <= 64; size <<= 1) {
+ drm_mm_init(&mm, 0, size*count);
+ for (n = 0; n < count; n++) {
+ if (!expect_insert(&mm, &nodes[n],
+ size, 0, n,
+ bottomup)) {
+ pr_err("bottomup insert failed, size %u step %d\n", size, n);
+ goto out;
+ }
+
+ if (!assert_one_hole(&mm, size*(n + 1), size*count))
+ goto out;
+ }
+
+ if (!assert_continuous(&mm, size))
+ goto out;
+
+ drm_random_reorder(order, count, &prng);
+ for_each_prime_number_from(n, 1, min(count, max_prime)) {
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ drm_mm_remove_node(node);
+ __set_bit(node_index(node), bitmap);
+ }
+
+ for (m = 0; m < n; m++) {
+ unsigned int first;
+
+ node = &nodes[order[(o + m) % count]];
+ if (!expect_insert(&mm, node,
+ size, 0, 0,
+ bottomup)) {
+ pr_err("insert failed, step %d/%d\n", m, n);
+ goto out;
+ }
+
+ first = find_first_bit(bitmap, count);
+ if (node_index(node) != first) {
+ pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
+ m, n, first, node_index(node));
+ goto out;
+ }
+ __clear_bit(first, bitmap);
+ }
+
+ DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
+
+ o += n;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_bitmap:
+ kfree(bitmap);
+err_nodes:
+ vfree(nodes);
+err:
+ return ret;
+}
+
+static void separate_adjacent_colors(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
+{
+ if (node->allocated && node->color != color)
+ ++*start;
+
+ node = list_next_entry(node, node_list);
+ if (node->allocated && node->color != color)
+ --*end;
+}
+
+static bool colors_abutt(const struct drm_mm_node *node)
+{
+ if (!drm_mm_hole_follows(node) &&
+ list_next_entry(node, node_list)->allocated) {
+ pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
+ node->color, node->start, node->size,
+ list_next_entry(node, node_list)->color,
+ list_next_entry(node, node_list)->start,
+ list_next_entry(node, node_list)->size);
+ return true;
+ }
+
+ return false;
+}
+
+static int igt_color(void *ignored)
+{
+ const unsigned int count = min(4096u, max_iterations);
+ const struct insert_mode *mode;
+ struct drm_mm mm;
+ struct drm_mm_node *node, *nn;
+ unsigned int n;
+ int ret = -EINVAL, err;
+
+ /* Color adjustment complicates everything. First we just check
+ * that when we insert a node we apply any color_adjustment callback.
+ * The callback we use should ensure that there is a gap between
+ * any two nodes, and so after each insertion we check that those
+ * holes are inserted and that they are preserved.
+ */
+
+ drm_mm_init(&mm, 0, U64_MAX);
+
+ for (n = 1; n <= count; n++) {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!expect_insert(&mm, node,
+ n, 0, n,
+ &insert_modes[0])) {
+ pr_err("insert failed, step %d\n", n);
+ kfree(node);
+ goto out;
+ }
+ }
+
+ drm_mm_for_each_node_safe(node, nn, &mm) {
+ if (node->color != node->size) {
+ pr_err("invalid color stored: expected %lld, found %ld\n",
+ node->size, node->color);
+
+ goto out;
+ }
+
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+
+ /* Now, let's start experimenting with applying a color callback */
+ mm.color_adjust = separate_adjacent_colors;
+ for (mode = insert_modes; mode->name; mode++) {
+ u64 last;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ node->size = 1 + 2*count;
+ node->color = node->size;
+
+ err = drm_mm_reserve_node(&mm, node);
+ if (err) {
+ pr_err("initial reserve failed!\n");
+ ret = err;
+ goto out;
+ }
+
+ last = node->start + node->size;
+
+ for (n = 1; n <= count; n++) {
+ int rem;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ node->start = last;
+ node->size = n + count;
+ node->color = node->size;
+
+ err = drm_mm_reserve_node(&mm, node);
+ if (err != -ENOSPC) {
+ pr_err("reserve %d did not report color overlap! err=%d\n",
+ n, err);
+ goto out;
+ }
+
+ node->start += n + 1;
+ rem = misalignment(node, n + count);
+ node->start += n + count - rem;
+
+ err = drm_mm_reserve_node(&mm, node);
+ if (err) {
+ pr_err("reserve %d failed, err=%d\n", n, err);
+ ret = err;
+ goto out;
+ }
+
+ last = node->start + node->size;
+ }
+
+ for (n = 1; n <= count; n++) {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!expect_insert(&mm, node,
+ n, n, n,
+ mode)) {
+ pr_err("%s insert failed, step %d\n",
+ mode->name, n);
+ kfree(node);
+ goto out;
+ }
+ }
+
+ drm_mm_for_each_node_safe(node, nn, &mm) {
+ u64 rem;
+
+ if (node->color != node->size) {
+ pr_err("%s invalid color stored: expected %lld, found %ld\n",
+ mode->name, node->size, node->color);
+
+ goto out;
+ }
+
+ if (colors_abutt(node))
+ goto out;
+
+ div64_u64_rem(node->start, node->size, &rem);
+ if (rem) {
+ pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
+ mode->name, node->start, node->size, rem);
+ goto out;
+ }
+
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, nn, &mm) {
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+ drm_mm_takedown(&mm);
+ return ret;
+}
+
+static int evict_color(struct drm_mm *mm,
+ u64 range_start, u64 range_end,
+ struct evict_node *nodes,
+ unsigned int *order,
+ unsigned int count,
+ unsigned int size,
+ unsigned int alignment,
+ unsigned long color,
+ const struct insert_mode *mode)
+{
+ struct drm_mm_scan scan;
+ LIST_HEAD(evict_list);
+ struct evict_node *e;
+ struct drm_mm_node tmp;
+ int err;
+
+ drm_mm_scan_init_with_range(&scan, mm,
+ size, alignment, color,
+ range_start, range_end,
+ mode->mode);
+ if (!evict_nodes(&scan,
+ nodes, order, count, true,
+ &evict_list))
+ return -EINVAL;
+
+ memset(&tmp, 0, sizeof(tmp));
+ err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
+ DRM_MM_INSERT_EVICT);
+ if (err) {
+ pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
+ size, alignment, color, err);
+ show_scan(&scan);
+ show_holes(mm, 3);
+ return err;
+ }
+
+ if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
+ pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
+ tmp.start, tmp.size, range_start, range_end);
+ err = -EINVAL;
+ }
+
+ if (colors_abutt(&tmp))
+ err = -EINVAL;
+
+ if (!assert_node(&tmp, mm, size, alignment, color)) {
+ pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
+ tmp.size, size,
+ alignment, misalignment(&tmp, alignment), tmp.start);
+ err = -EINVAL;
+ }
+
+ drm_mm_remove_node(&tmp);
+ if (err)
+ return err;
+
+ list_for_each_entry(e, &evict_list, link) {
+ err = drm_mm_reserve_node(mm, &e->node);
+ if (err) {
+ pr_err("Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int igt_color_evict(void *ignored)
+{
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int total_size = min(8192u, max_iterations);
+ const struct insert_mode *mode;
+ unsigned long color = 0;
+ struct drm_mm mm;
+ struct evict_node *nodes;
+ struct drm_mm_node *node, *next;
+ unsigned int *order, n;
+ int ret, err;
+
+ /* Check that the drm_mm_scan also honours color adjustment when
+ * choosing its victims to create a hole. Our color_adjust does not
+ * allow two nodes to be placed together without an intervening hole
+ * enlarging the set of victims that must be evicted.
+ */
+
+ ret = -ENOMEM;
+ nodes = vzalloc(total_size * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ order = drm_random_order(total_size, &prng);
+ if (!order)
+ goto err_nodes;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, 2*total_size - 1);
+ mm.color_adjust = separate_adjacent_colors;
+ for (n = 0; n < total_size; n++) {
+ if (!expect_insert(&mm, &nodes[n].node,
+ 1, 0, color++,
+ &insert_modes[0])) {
+ pr_err("insert failed, step %d\n", n);
+ goto out;
+ }
+ }
+
+ for (mode = evict_modes; mode->name; mode++) {
+ for (n = 1; n <= total_size; n <<= 1) {
+ drm_random_reorder(order, total_size, &prng);
+ err = evict_color(&mm, 0, U64_MAX,
+ nodes, order, total_size,
+ n, 1, color++,
+ mode);
+ if (err) {
+ pr_err("%s evict_color(size=%u) failed\n",
+ mode->name, n);
+ goto out;
+ }
+ }
+
+ for (n = 1; n < total_size; n <<= 1) {
+ drm_random_reorder(order, total_size, &prng);
+ err = evict_color(&mm, 0, U64_MAX,
+ nodes, order, total_size,
+ total_size/2, n, color++,
+ mode);
+ if (err) {
+ pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
+ mode->name, total_size/2, n);
+ goto out;
+ }
+ }
+
+ for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
+ unsigned int nsize = (total_size - n + 1) / 2;
+
+ DRM_MM_BUG_ON(!nsize);
+
+ drm_random_reorder(order, total_size, &prng);
+ err = evict_color(&mm, 0, U64_MAX,
+ nodes, order, total_size,
+ nsize, n, color++,
+ mode);
+ if (err) {
+ pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
+ mode->name, nsize, n);
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+out:
+ if (ret)
+ show_mm(&mm);
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+err:
+ return ret;
+}
+
+static int igt_color_evict_range(void *ignored)
+{
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int total_size = 8192;
+ const unsigned int range_size = total_size / 2;
+ const unsigned int range_start = total_size / 4;
+ const unsigned int range_end = range_start + range_size;
+ const struct insert_mode *mode;
+ unsigned long color = 0;
+ struct drm_mm mm;
+ struct evict_node *nodes;
+ struct drm_mm_node *node, *next;
+ unsigned int *order, n;
+ int ret, err;
+
+ /* Like igt_color_evict(), but limited to small portion of the full
+ * drm_mm range.
+ */
+
+ ret = -ENOMEM;
+ nodes = vzalloc(total_size * sizeof(*nodes));
+ if (!nodes)
+ goto err;
+
+ order = drm_random_order(total_size, &prng);
+ if (!order)
+ goto err_nodes;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, 2*total_size - 1);
+ mm.color_adjust = separate_adjacent_colors;
+ for (n = 0; n < total_size; n++) {
+ if (!expect_insert(&mm, &nodes[n].node,
+ 1, 0, color++,
+ &insert_modes[0])) {
+ pr_err("insert failed, step %d\n", n);
+ goto out;
+ }
+ }
+
+ for (mode = evict_modes; mode->name; mode++) {
+ for (n = 1; n <= range_size; n <<= 1) {
+ drm_random_reorder(order, range_size, &prng);
+ err = evict_color(&mm, range_start, range_end,
+ nodes, order, total_size,
+ n, 1, color++,
+ mode);
+ if (err) {
+ pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
+ mode->name, n, range_start, range_end);
+ goto out;
+ }
+ }
+
+ for (n = 1; n < range_size; n <<= 1) {
+ drm_random_reorder(order, total_size, &prng);
+ err = evict_color(&mm, range_start, range_end,
+ nodes, order, total_size,
+ range_size/2, n, color++,
+ mode);
+ if (err) {
+ pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
+ mode->name, total_size/2, n, range_start, range_end);
+ goto out;
+ }
+ }
+
+ for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
+ unsigned int nsize = (range_size - n + 1) / 2;
+
+ DRM_MM_BUG_ON(!nsize);
+
+ drm_random_reorder(order, total_size, &prng);
+ err = evict_color(&mm, range_start, range_end,
+ nodes, order, total_size,
+ nsize, n, color++,
+ mode);
+ if (err) {
+ pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
+ mode->name, nsize, n, range_start, range_end);
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+out:
+ if (ret)
+ show_mm(&mm);
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+err:
+ return ret;
+}
+
+#include "drm_selftest.c"
+
+static int __init test_drm_mm_init(void)
+{
+ int err;
+
+ while (!random_seed)
+ random_seed = get_random_int();
+
+ pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
+ random_seed, max_iterations, max_prime);
+ err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
+
+ return err > 0 ? 0 : err;
+}
+
+static void __exit test_drm_mm_exit(void)
+{
+}
+
+module_init(test_drm_mm_init);
+module_exit(test_drm_mm_exit);
+
+module_param(random_seed, uint, 0400);
+module_param(max_iterations, uint, 0400);
+module_param(max_prime, uint, 0400);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index dddbdd62bed0..445476551695 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -174,7 +174,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
if (scrtc->started)
return;
- format = shmob_drm_format_info(crtc->primary->fb->pixel_format);
+ format = shmob_drm_format_info(crtc->primary->fb->format->format);
if (WARN_ON(format == NULL))
return;
@@ -376,10 +376,10 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
const struct shmob_drm_format_info *format;
void *cache;
- format = shmob_drm_format_info(crtc->primary->fb->pixel_format);
+ format = shmob_drm_format_info(crtc->primary->fb->format->format);
if (format == NULL) {
dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n",
- crtc->primary->fb->pixel_format);
+ crtc->primary->fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
index 38ed4ff8aaf2..818b31549ddc 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -16,6 +16,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
struct backlight_device;
struct shmob_drm_device;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 38dd55f4af81..33cec3d42389 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -104,7 +104,7 @@ static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
* DRM operations
*/
-static int shmob_drm_unload(struct drm_device *dev)
+static void shmob_drm_unload(struct drm_device *dev)
{
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
@@ -112,8 +112,6 @@ static int shmob_drm_unload(struct drm_device *dev)
drm_irq_uninstall(dev);
dev->dev_private = NULL;
-
- return 0;
}
static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index 1805bb23b113..2023a93cee2b 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -183,10 +183,10 @@ shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct shmob_drm_device *sdev = plane->dev->dev_private;
const struct shmob_drm_format_info *format;
- format = shmob_drm_format_info(fb->pixel_format);
+ format = shmob_drm_format_info(fb->format->format);
if (format == NULL) {
dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n",
- fb->pixel_format);
+ fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index a836451920f0..7f05da13ea5e 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -54,15 +54,13 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
return 0;
}
-static int sis_driver_unload(struct drm_device *dev)
+static void sis_driver_unload(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
idr_destroy(&dev_priv->object_idr);
kfree(dev_priv);
-
- return 0;
}
static const struct file_operations sis_driver_fops = {
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 03defda77766..1622db24cd39 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,8 +109,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
if (pool == AGP_TYPE) {
retval = drm_mm_insert_node(&dev_priv->agp_mm,
&item->mm_node,
- mem->size, 0,
- DRM_MM_SEARCH_DEFAULT);
+ mem->size);
offset = item->mm_node.start;
} else {
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -122,8 +121,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
#else
retval = drm_mm_insert_node(&dev_priv->vram_mm,
&item->mm_node,
- mem->size, 0,
- DRM_MM_SEARCH_DEFAULT);
+ mem->size);
offset = item->mm_node.start;
#endif
}
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index d20f7c0b4eac..c35db12435c3 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -13,7 +13,6 @@ sti-drm-y := \
sti_dvo.o \
sti_awg_utils.o \
sti_vtg.o \
- sti_vtac.o \
sti_hda.o \
sti_tvout.o \
sti_hqvdp.o \
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index e992bed98dcb..d45a4335df5d 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -134,21 +134,6 @@ sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
}
-static void sti_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
-{
- struct sti_mixer *mixer = to_sti_mixer(crtc);
-
- if (crtc->state->event) {
- crtc->state->event->pipe = drm_crtc_index(crtc);
-
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-
- mixer->pending_event = crtc->state->event;
- crtc->state->event = NULL;
- }
-}
-
static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -156,6 +141,8 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
struct drm_plane *p;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
DRM_DEBUG_DRIVER("\n");
@@ -220,13 +207,24 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
break;
}
}
+
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
}
static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
.enable = sti_crtc_enable,
.disable = sti_crtc_disabling,
.mode_set_nofb = sti_crtc_mode_set_nofb,
- .atomic_begin = sti_crtc_atomic_begin,
.atomic_flush = sti_crtc_atomic_flush,
};
@@ -250,7 +248,6 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
struct sti_compositor *compo;
struct drm_crtc *crtc = data;
struct sti_mixer *mixer;
- unsigned long flags;
struct sti_private *priv;
unsigned int pipe;
@@ -267,14 +264,6 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
drm_crtc_handle_vblank(crtc);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (mixer->pending_event) {
- drm_crtc_send_vblank_event(crtc, mixer->pending_event);
- drm_crtc_vblank_put(crtc);
- mixer->pending_event = NULL;
- }
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-
if (mixer->status == STI_MIXER_DISABLING) {
struct drm_plane *p;
@@ -317,19 +306,12 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
struct sti_private *priv = drm_dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
- struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
struct sti_vtg *vtg = compo->vtg[pipe];
DRM_DEBUG_DRIVER("\n");
if (sti_vtg_unregister_client(vtg, vtg_vblank_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
-
- /* free the resources of the pending requests */
- if (compo->mixer[pipe]->pending_event) {
- drm_crtc_vblank_put(crtc);
- compo->mixer[pipe]->pending_event = NULL;
- }
}
static int sti_crtc_late_register(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index ff71e25ab5bf..20fc0fbfa849 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -58,7 +58,9 @@ static int sti_drm_fps_set(void *data, u64 val)
list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
struct sti_plane *plane = to_sti_plane(p);
+ memset(&plane->fps_info, 0, sizeof(plane->fps_info));
plane->fps_info.output = (val >> i) & 1;
+
i++;
}
@@ -89,38 +91,9 @@ static struct drm_info_list sti_drm_dbg_list[] = {
{"fps_get", sti_drm_fps_dbg_show, 0},
};
-static int sti_drm_debugfs_create(struct dentry *root,
- struct drm_minor *minor,
- const char *name,
- const struct file_operations *fops)
-{
- struct drm_device *dev = minor->dev;
- struct drm_info_node *node;
- struct dentry *ent;
-
- ent = debugfs_create_file(name, S_IRUGO | S_IWUSR, root, dev, fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
-
- node = kmalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- debugfs_remove(ent);
- return -ENOMEM;
- }
-
- node->minor = minor;
- node->dent = ent;
- node->info_ent = (void *)fops;
-
- mutex_lock(&minor->debugfs_lock);
- list_add(&node->list, &minor->debugfs_list);
- mutex_unlock(&minor->debugfs_lock);
-
- return 0;
-}
-
static int sti_drm_dbg_init(struct drm_minor *minor)
{
+ struct dentry *dentry;
int ret;
ret = drm_debugfs_create_files(sti_drm_dbg_list,
@@ -129,10 +102,13 @@ static int sti_drm_dbg_init(struct drm_minor *minor)
if (ret)
goto err;
- ret = sti_drm_debugfs_create(minor->debugfs_root, minor, "fps_show",
+ dentry = debugfs_create_file("fps_show", S_IRUGO | S_IWUSR,
+ minor->debugfs_root, minor->dev,
&sti_drm_fps_fops);
- if (ret)
+ if (!dentry) {
+ ret = -ENOMEM;
goto err;
+ }
DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
return 0;
@@ -141,61 +117,6 @@ err:
return ret;
}
-static void sti_drm_dbg_cleanup(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(sti_drm_dbg_list,
- ARRAY_SIZE(sti_drm_dbg_list), minor);
-
- drm_debugfs_remove_files((struct drm_info_list *)&sti_drm_fps_fops,
- 1, minor);
-}
-
-static void sti_atomic_schedule(struct sti_private *private,
- struct drm_atomic_state *state)
-{
- private->commit.state = state;
- schedule_work(&private->commit.work);
-}
-
-static void sti_atomic_complete(struct sti_private *private,
- struct drm_atomic_state *state)
-{
- struct drm_device *drm = private->drm_dev;
-
- /*
- * Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one condition: It must be guaranteed
- * that the asynchronous work has either been cancelled (if the driver
- * supports it, which at least requires that the framebuffers get
- * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
- * before the new state gets committed on the software side with
- * drm_atomic_helper_swap_state().
- *
- * This scheme allows new atomic state updates to be prepared and
- * checked in parallel to the asynchronous completion of the previous
- * update. Which is important since compositors need to figure out the
- * composition of the next frame right after having submitted the
- * current layout.
- */
-
- drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_planes(drm, state, 0);
- drm_atomic_helper_commit_modeset_enables(drm, state);
-
- drm_atomic_helper_wait_for_vblanks(drm, state);
-
- drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_put(state);
-}
-
-static void sti_atomic_work(struct work_struct *work)
-{
- struct sti_private *private = container_of(work,
- struct sti_private, commit.work);
-
- sti_atomic_complete(private, private->commit.state);
-}
-
static int sti_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -216,62 +137,18 @@ static int sti_atomic_check(struct drm_device *dev,
return ret;
}
-static int sti_atomic_commit(struct drm_device *drm,
- struct drm_atomic_state *state, bool nonblock)
-{
- struct sti_private *private = drm->dev_private;
- int err;
-
- err = drm_atomic_helper_prepare_planes(drm, state);
- if (err)
- return err;
-
- /* serialize outstanding nonblocking commits */
- mutex_lock(&private->commit.lock);
- flush_work(&private->commit.work);
-
- /*
- * This is the point of no return - everything below never fails except
- * when the hw goes bonghits. Which means we can commit the new state on
- * the software side now.
- */
-
- drm_atomic_helper_swap_state(state, true);
-
- drm_atomic_state_get(state);
- if (nonblock)
- sti_atomic_schedule(private, state);
- else
- sti_atomic_complete(private, state);
-
- mutex_unlock(&private->commit.lock);
- return 0;
-}
-
static void sti_output_poll_changed(struct drm_device *ddev)
{
struct sti_private *private = ddev->dev_private;
- if (!ddev->mode_config.num_connector)
- return;
-
- if (private->fbdev) {
- drm_fbdev_cma_hotplug_event(private->fbdev);
- return;
- }
-
- private->fbdev = drm_fbdev_cma_init(ddev, 32,
- ddev->mode_config.num_crtc,
- ddev->mode_config.num_connector);
- if (IS_ERR(private->fbdev))
- private->fbdev = NULL;
+ drm_fbdev_cma_hotplug_event(private->fbdev);
}
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = sti_output_poll_changed,
.atomic_check = sti_atomic_check,
- .atomic_commit = sti_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static void sti_mode_config_init(struct drm_device *dev)
@@ -326,7 +203,6 @@ static struct drm_driver sti_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.debugfs_init = sti_drm_dbg_init,
- .debugfs_cleanup = sti_drm_dbg_cleanup,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -352,9 +228,6 @@ static int sti_init(struct drm_device *ddev)
dev_set_drvdata(ddev->dev, ddev);
private->drm_dev = ddev;
- mutex_init(&private->commit.lock);
- INIT_WORK(&private->commit.work, sti_atomic_work);
-
drm_mode_config_init(ddev);
sti_mode_config_init(ddev);
@@ -375,6 +248,7 @@ static void sti_cleanup(struct drm_device *ddev)
drm_kms_helper_poll_fini(ddev);
drm_vblank_cleanup(ddev);
+ component_unbind_all(ddev->dev, ddev);
kfree(private);
ddev->dev_private = NULL;
}
@@ -382,6 +256,8 @@ static void sti_cleanup(struct drm_device *ddev)
static int sti_bind(struct device *dev)
{
struct drm_device *ddev;
+ struct sti_private *private;
+ struct drm_fbdev_cma *fbdev;
int ret;
ddev = drm_dev_alloc(&sti_driver, dev);
@@ -404,6 +280,17 @@ static int sti_bind(struct device *dev)
drm_mode_config_reset(ddev);
+ private = ddev->dev_private;
+ if (ddev->mode_config.num_connector) {
+ fbdev = drm_fbdev_cma_init(ddev, 32,
+ ddev->mode_config.num_connector);
+ if (IS_ERR(fbdev)) {
+ DRM_DEBUG_DRIVER("Warning: fails to create fbdev\n");
+ fbdev = NULL;
+ }
+ private->fbdev = fbdev;
+ }
+
return 0;
err_register:
@@ -476,7 +363,6 @@ static struct platform_driver sti_platform_driver = {
static struct platform_driver * const drivers[] = {
&sti_tvout_driver,
- &sti_vtac_driver,
&sti_hqvdp_driver,
&sti_hdmi_driver,
&sti_hda_driver,
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index 78ebe5e30f53..6502ed2d3351 100644
--- a/drivers/gpu/drm/sti/sti_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -25,16 +25,9 @@ struct sti_private {
struct drm_property *plane_zorder_property;
struct drm_device *drm_dev;
struct drm_fbdev_cma *fbdev;
-
- struct {
- struct drm_atomic_state *state;
- struct work_struct work;
- struct mutex lock;
- } commit;
};
extern struct platform_driver sti_tvout_driver;
-extern struct platform_driver sti_vtac_driver;
extern struct platform_driver sti_hqvdp_driver;
extern struct platform_driver sti_hdmi_driver;
extern struct platform_driver sti_hda_driver;
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index e8c1ed08a9f7..bb23318a44b7 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -195,13 +195,6 @@ static struct drm_info_list dvo_debugfs_files[] = {
{ "dvo", dvo_dbg_show, 0, NULL },
};
-static void dvo_debugfs_exit(struct sti_dvo *dvo, struct drm_minor *minor)
-{
- drm_debugfs_remove_files(dvo_debugfs_files,
- ARRAY_SIZE(dvo_debugfs_files),
- minor);
-}
-
static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
{
unsigned int i;
@@ -478,14 +471,13 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
return err;
}
- err = drm_bridge_attach(drm_dev, bridge);
+ err = drm_bridge_attach(encoder, bridge, NULL);
if (err) {
DRM_ERROR("Failed to attach bridge\n");
return err;
}
dvo->bridge = bridge;
- encoder->bridge = bridge;
connector->encoder = encoder;
dvo->encoder = encoder;
@@ -515,9 +507,6 @@ static void sti_dvo_unbind(struct device *dev,
struct device *master, void *data)
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
-
- dvo_debugfs_exit(dvo, drm_dev->primary);
drm_bridge_remove(dvo->bridge);
}
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 81df3097b545..86279f5022c2 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -610,7 +610,6 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_gdp *gdp = to_sti_gdp(plane);
struct drm_crtc *crtc = state->crtc;
- struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
struct drm_framebuffer *fb = state->fb;
struct drm_crtc_state *crtc_state;
struct sti_mixer *mixer;
@@ -636,10 +635,10 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
- format = sti_gdp_fourcc2format(fb->pixel_format);
+ format = sti_gdp_fourcc2format(fb->format->format);
if (format == -1) {
DRM_ERROR("Format not supported by GDP %.4s\n",
- (char *)&fb->pixel_format);
+ (char *)&fb->format->format);
return -EINVAL;
}
@@ -648,45 +647,30 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
return -EINVAL;
}
- if (!gdp->vtg) {
- /* Register gdp callback */
- gdp->vtg = compo->vtg[mixer->id];
- if (sti_vtg_register_client(gdp->vtg,
- &gdp->vtg_field_nb, crtc)) {
- DRM_ERROR("Cannot register VTG notifier\n");
+ /* Set gdp clock */
+ if (mode->clock && gdp->clk_pix) {
+ struct clk *clkp;
+ int rate = mode->clock * 1000;
+ int res;
+
+ /*
+ * According to the mixer used, the gdp pixel clock
+ * should have a different parent clock.
+ */
+ if (mixer->id == STI_MIXER_MAIN)
+ clkp = gdp->clk_main_parent;
+ else
+ clkp = gdp->clk_aux_parent;
+
+ if (clkp)
+ clk_set_parent(gdp->clk_pix, clkp);
+
+ res = clk_set_rate(gdp->clk_pix, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+ rate);
return -EINVAL;
}
-
- /* Set and enable gdp clock */
- if (gdp->clk_pix) {
- struct clk *clkp;
- int rate = mode->clock * 1000;
- int res;
-
- /*
- * According to the mixer used, the gdp pixel clock
- * should have a different parent clock.
- */
- if (mixer->id == STI_MIXER_MAIN)
- clkp = gdp->clk_main_parent;
- else
- clkp = gdp->clk_aux_parent;
-
- if (clkp)
- clk_set_parent(gdp->clk_pix, clkp);
-
- res = clk_set_rate(gdp->clk_pix, rate);
- if (res < 0) {
- DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
- rate);
- return -EINVAL;
- }
-
- if (clk_prepare_enable(gdp->clk_pix)) {
- DRM_ERROR("Failed to prepare/enable gdp\n");
- return -EINVAL;
- }
- }
}
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
@@ -724,6 +708,31 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
if (!crtc || !fb)
return;
+ if ((oldstate->fb == state->fb) &&
+ (oldstate->crtc_x == state->crtc_x) &&
+ (oldstate->crtc_y == state->crtc_y) &&
+ (oldstate->crtc_w == state->crtc_w) &&
+ (oldstate->crtc_h == state->crtc_h) &&
+ (oldstate->src_x == state->src_x) &&
+ (oldstate->src_y == state->src_y) &&
+ (oldstate->src_w == state->src_w) &&
+ (oldstate->src_h == state->src_h)) {
+ /* No change since last update, do not post cmd */
+ DRM_DEBUG_DRIVER("No change, not posting cmd\n");
+ plane->status = STI_PLANE_UPDATED;
+ return;
+ }
+
+ if (!gdp->vtg) {
+ struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+
+ /* Register gdp callback */
+ gdp->vtg = compo->vtg[mixer->id];
+ sti_vtg_register_client(gdp->vtg, &gdp->vtg_field_nb, crtc);
+ clk_prepare_enable(gdp->clk_pix);
+ }
+
mode = &crtc->mode;
dst_x = state->crtc_x;
dst_y = state->crtc_y;
@@ -745,7 +754,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
/* build the top field */
top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
- format = sti_gdp_fourcc2format(fb->pixel_format);
+ format = sti_gdp_fourcc2format(fb->format->format);
top_field->gam_gdp_ctl |= format;
top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
@@ -753,11 +762,11 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
- (char *)&fb->pixel_format,
+ (char *)&fb->format->format,
(unsigned long)cma_obj->paddr);
/* pixel memory location */
- bpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ bpp = fb->format->cpp[0];
top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
top_field->gam_gdp_pml += src_x * bpp;
top_field->gam_gdp_pml += src_y * fb->pitches[0];
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 96f336dd0e29..0c0a75bc8bc3 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -365,13 +365,6 @@ static struct drm_info_list hda_debugfs_files[] = {
{ "hda", hda_dbg_show, 0, NULL },
};
-static void hda_debugfs_exit(struct sti_hda *hda, struct drm_minor *minor)
-{
- drm_debugfs_remove_files(hda_debugfs_files,
- ARRAY_SIZE(hda_debugfs_files),
- minor);
-}
-
static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
{
unsigned int i;
@@ -707,9 +700,8 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = hda;
bridge->funcs = &sti_hda_bridge_funcs;
- drm_bridge_attach(drm_dev, bridge);
+ drm_bridge_attach(encoder, bridge, NULL);
- encoder->bridge = bridge;
connector->encoder = encoder;
drm_connector = (struct drm_connector *)connector;
@@ -740,10 +732,6 @@ err_sysfs:
static void sti_hda_unbind(struct device *dev,
struct device *master, void *data)
{
- struct sti_hda *hda = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
-
- hda_debugfs_exit(hda, drm_dev->primary);
}
static const struct component_ops sti_hda_ops = {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 376b0763c874..ce2dcba679d5 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -95,7 +95,6 @@
#define HDMI_CFG_HDCP_EN BIT(2)
#define HDMI_CFG_ESS_NOT_OESS BIT(3)
#define HDMI_CFG_H_SYNC_POL_NEG BIT(4)
-#define HDMI_CFG_SINK_TERM_DET_EN BIT(5)
#define HDMI_CFG_V_SYNC_POL_NEG BIT(6)
#define HDMI_CFG_422_EN BIT(8)
#define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12)
@@ -159,7 +158,6 @@ struct sti_hdmi_connector {
struct drm_encoder *encoder;
struct sti_hdmi *hdmi;
struct drm_property *colorspace_property;
- struct drm_property *hdmi_mode_property;
};
#define to_sti_hdmi_connector(x) \
@@ -266,12 +264,9 @@ static void hdmi_config(struct sti_hdmi *hdmi)
/* Select encryption type and the framing mode */
conf |= HDMI_CFG_ESS_NOT_OESS;
- if (hdmi->hdmi_mode == HDMI_MODE_HDMI)
+ if (hdmi->hdmi_monitor)
conf |= HDMI_CFG_HDMI_NOT_DVI;
- /* Enable sink term detection */
- conf |= HDMI_CFG_SINK_TERM_DET_EN;
-
/* Set Hsync polarity */
if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) {
DRM_DEBUG_DRIVER("H Sync Negative\n");
@@ -607,9 +602,6 @@ static void hdmi_dbg_cfg(struct seq_file *s, int val)
tmp = val & HDMI_CFG_ESS_NOT_OESS;
DBGFS_PRINT_STR("HDCP mode:", tmp ? "ESS enable" : "OESS enable");
seq_puts(s, "\t\t\t\t\t");
- tmp = val & HDMI_CFG_SINK_TERM_DET_EN;
- DBGFS_PRINT_STR("Sink term detection:", tmp ? "enable" : "disable");
- seq_puts(s, "\t\t\t\t\t");
tmp = val & HDMI_CFG_H_SYNC_POL_NEG;
DBGFS_PRINT_STR("Hsync polarity:", tmp ? "inverted" : "normal");
seq_puts(s, "\t\t\t\t\t");
@@ -731,13 +723,6 @@ static struct drm_info_list hdmi_debugfs_files[] = {
{ "hdmi", hdmi_dbg_show, 0, NULL },
};
-static void hdmi_debugfs_exit(struct sti_hdmi *hdmi, struct drm_minor *minor)
-{
- drm_debugfs_remove_files(hdmi_debugfs_files,
- ARRAY_SIZE(hdmi_debugfs_files),
- minor);
-}
-
static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
{
unsigned int i;
@@ -788,6 +773,95 @@ static void sti_hdmi_disable(struct drm_bridge *bridge)
hdmi->enabled = false;
}
+/**
+ * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
+ * clocks. None-coherent clocks means that audio and TMDS clocks have not the
+ * same source (drifts between clocks). In this case assumption is that CTS is
+ * automatically calculated by hardware.
+ *
+ * @audio_fs: audio frame clock frequency in Hz
+ *
+ * Values computed are based on table described in HDMI specification 1.4b
+ *
+ * Returns n value.
+ */
+static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
+{
+ unsigned int n;
+
+ switch (audio_fs) {
+ case 32000:
+ n = 4096;
+ break;
+ case 44100:
+ n = 6272;
+ break;
+ case 48000:
+ n = 6144;
+ break;
+ case 88200:
+ n = 6272 * 2;
+ break;
+ case 96000:
+ n = 6144 * 2;
+ break;
+ case 176400:
+ n = 6272 * 4;
+ break;
+ case 192000:
+ n = 6144 * 4;
+ break;
+ default:
+ /* Not pre-defined, recommended value: 128 * fs / 1000 */
+ n = (audio_fs * 128) / 1000;
+ }
+
+ return n;
+}
+
+static int hdmi_audio_configure(struct sti_hdmi *hdmi)
+{
+ int audio_cfg, n;
+ struct hdmi_audio_params *params = &hdmi->audio;
+ struct hdmi_audio_infoframe *info = &params->cea;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!hdmi->enabled)
+ return 0;
+
+ /* update N parameter */
+ n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
+
+ DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
+ params->sample_rate, hdmi->mode.clock * 1000, n);
+ hdmi_write(hdmi, n, HDMI_AUDN);
+
+ /* update HDMI registers according to configuration */
+ audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+ HDMI_AUD_CFG_ONE_BIT_INVALID;
+
+ switch (info->channels) {
+ case 8:
+ audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
+ case 6:
+ audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
+ case 4:
+ audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
+ case 2:
+ audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
+ break;
+ default:
+ DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
+ info->channels);
+ return -EINVAL;
+ }
+
+ hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+ return hdmi_audio_infoframe_config(hdmi);
+}
+
static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
{
struct sti_hdmi *hdmi = bridge->driver_private;
@@ -826,9 +900,12 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
if (hdmi_avi_infoframe_config(hdmi))
DRM_ERROR("Unable to configure AVI infoframe\n");
- /* Program AUDIO infoframe */
- if (hdmi_audio_infoframe_config(hdmi))
- DRM_ERROR("Unable to configure AUDIO infoframe\n");
+ if (hdmi->audio.enabled) {
+ if (hdmi_audio_configure(hdmi))
+ DRM_ERROR("Unable to configure audio\n");
+ } else {
+ hdmi_audio_infoframe_config(hdmi);
+ }
/* Program VS infoframe */
if (hdmi_vendor_infoframe_config(hdmi))
@@ -892,6 +969,11 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
if (!edid)
goto fail;
+ hdmi->hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ DRM_DEBUG_KMS("%s : %dx%d cm\n",
+ (hdmi->hdmi_monitor ? "hdmi monitor" : "dvi monitor"),
+ edid->width_cm, edid->height_cm);
+
count = drm_add_edid_modes(connector, edid);
drm_mode_connector_update_edid_property(connector, edid);
drm_edid_to_eld(connector, edid);
@@ -975,19 +1057,6 @@ static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
}
hdmi_connector->colorspace_property = prop;
drm_object_attach_property(&connector->base, prop, hdmi->colorspace);
-
- /* hdmi_mode property */
- hdmi->hdmi_mode = DEFAULT_HDMI_MODE;
- prop = drm_property_create_enum(drm_dev, 0, "hdmi_mode",
- hdmi_mode_names,
- ARRAY_SIZE(hdmi_mode_names));
- if (!prop) {
- DRM_ERROR("fails to create colorspace property\n");
- return;
- }
- hdmi_connector->hdmi_mode_property = prop;
- drm_object_attach_property(&connector->base, prop, hdmi->hdmi_mode);
-
}
static int
@@ -1005,11 +1074,6 @@ sti_hdmi_connector_set_property(struct drm_connector *connector,
return 0;
}
- if (property == hdmi_connector->hdmi_mode_property) {
- hdmi->hdmi_mode = val;
- return 0;
- }
-
DRM_ERROR("failed to set hdmi connector property\n");
return -EINVAL;
}
@@ -1029,11 +1093,6 @@ sti_hdmi_connector_get_property(struct drm_connector *connector,
return 0;
}
- if (property == hdmi_connector->hdmi_mode_property) {
- *val = hdmi->hdmi_mode;
- return 0;
- }
-
DRM_ERROR("failed to get hdmi connector property\n");
return -EINVAL;
}
@@ -1078,97 +1137,6 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
return NULL;
}
-/**
- * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
- * clocks. None-coherent clocks means that audio and TMDS clocks have not the
- * same source (drifts between clocks). In this case assumption is that CTS is
- * automatically calculated by hardware.
- *
- * @audio_fs: audio frame clock frequency in Hz
- *
- * Values computed are based on table described in HDMI specification 1.4b
- *
- * Returns n value.
- */
-static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
-{
- unsigned int n;
-
- switch (audio_fs) {
- case 32000:
- n = 4096;
- break;
- case 44100:
- n = 6272;
- break;
- case 48000:
- n = 6144;
- break;
- case 88200:
- n = 6272 * 2;
- break;
- case 96000:
- n = 6144 * 2;
- break;
- case 176400:
- n = 6272 * 4;
- break;
- case 192000:
- n = 6144 * 4;
- break;
- default:
- /* Not pre-defined, recommended value: 128 * fs / 1000 */
- n = (audio_fs * 128) / 1000;
- }
-
- return n;
-}
-
-static int hdmi_audio_configure(struct sti_hdmi *hdmi,
- struct hdmi_audio_params *params)
-{
- int audio_cfg, n;
- struct hdmi_audio_infoframe *info = &params->cea;
-
- DRM_DEBUG_DRIVER("\n");
-
- if (!hdmi->enabled)
- return 0;
-
- /* update N parameter */
- n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
-
- DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
- params->sample_rate, hdmi->mode.clock * 1000, n);
- hdmi_write(hdmi, n, HDMI_AUDN);
-
- /* update HDMI registers according to configuration */
- audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
- HDMI_AUD_CFG_ONE_BIT_INVALID;
-
- switch (info->channels) {
- case 8:
- audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
- case 6:
- audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
- case 4:
- audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
- case 2:
- audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
- break;
- default:
- DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
- info->channels);
- return -EINVAL;
- }
-
- hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
-
- hdmi->audio = *params;
-
- return hdmi_audio_infoframe_config(hdmi);
-}
-
static void hdmi_audio_shutdown(struct device *dev, void *data)
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1192,17 +1160,9 @@ static int hdmi_audio_hw_params(struct device *dev,
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
int ret;
- struct hdmi_audio_params audio = {
- .sample_width = params->sample_width,
- .sample_rate = params->sample_rate,
- .cea = params->cea,
- };
DRM_DEBUG_DRIVER("\n");
- if (!hdmi->enabled)
- return 0;
-
if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
daifmt->frame_clk_inv || daifmt->bit_clk_master ||
daifmt->frame_clk_master) {
@@ -1213,9 +1173,13 @@ static int hdmi_audio_hw_params(struct device *dev,
return -EINVAL;
}
- audio.enabled = true;
+ hdmi->audio.sample_width = params->sample_width;
+ hdmi->audio.sample_rate = params->sample_rate;
+ hdmi->audio.cea = params->cea;
+
+ hdmi->audio.enabled = true;
- ret = hdmi_audio_configure(hdmi, &audio);
+ ret = hdmi_audio_configure(hdmi);
if (ret < 0)
return ret;
@@ -1308,9 +1272,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = hdmi;
bridge->funcs = &sti_hdmi_bridge_funcs;
- drm_bridge_attach(drm_dev, bridge);
+ drm_bridge_attach(encoder, bridge, NULL);
- encoder->bridge = bridge;
connector->encoder = encoder;
drm_connector = (struct drm_connector *)connector;
@@ -1360,10 +1323,6 @@ err_sysfs:
static void sti_hdmi_unbind(struct device *dev,
struct device *master, void *data)
{
- struct sti_hdmi *hdmi = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
-
- hdmi_debugfs_exit(hdmi, drm_dev->primary);
}
static const struct component_ops sti_hdmi_ops = {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 119bc3582ac7..407012350f1a 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -30,19 +30,6 @@ struct hdmi_audio_params {
struct hdmi_audio_infoframe cea;
};
-/* values for the framing mode property */
-enum sti_hdmi_modes {
- HDMI_MODE_HDMI,
- HDMI_MODE_DVI,
-};
-
-static const struct drm_prop_enum_list hdmi_mode_names[] = {
- { HDMI_MODE_HDMI, "hdmi" },
- { HDMI_MODE_DVI, "dvi" },
-};
-
-#define DEFAULT_HDMI_MODE HDMI_MODE_HDMI
-
static const struct drm_prop_enum_list colorspace_mode_names[] = {
{ HDMI_COLORSPACE_RGB, "rgb" },
{ HDMI_COLORSPACE_YUV422, "yuv422" },
@@ -73,7 +60,7 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = {
* @reset: reset control of the hdmi phy
* @ddc_adapt: i2c ddc adapter
* @colorspace: current colorspace selected
- * @hdmi_mode: select framing for HDMI or DVI
+ * @hdmi_monitor: true if HDMI monitor detected else DVI monitor assumed
* @audio_pdev: ASoC hdmi-codec platform device
* @audio: hdmi audio parameters.
* @drm_connector: hdmi connector
@@ -98,7 +85,7 @@ struct sti_hdmi {
struct reset_control *reset;
struct i2c_adapter *ddc_adapt;
enum hdmi_colorspace colorspace;
- enum sti_hdmi_modes hdmi_mode;
+ bool hdmi_monitor;
struct platform_device *audio_pdev;
struct hdmi_audio_params audio;
struct drm_connector *drm_connector;
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index f88130f2eb48..66f843148ef7 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -332,6 +332,7 @@ struct sti_hqvdp_cmd {
* @hqvdp_cmd_paddr: physical address of hqvdp_cmd
* @vtg: vtg for main data path
* @xp70_initialized: true if xp70 is already initialized
+ * @vtg_registered: true if registered to VTG
*/
struct sti_hqvdp {
struct device *dev;
@@ -347,6 +348,7 @@ struct sti_hqvdp {
u32 hqvdp_cmd_paddr;
struct sti_vtg *vtg;
bool xp70_initialized;
+ bool vtg_registered;
};
#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
@@ -771,7 +773,7 @@ static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
DRM_ERROR("XP70 could not revert to idle\n");
hqvdp->plane.status = STI_PLANE_DISABLED;
- hqvdp->xp70_initialized = false;
+ hqvdp->vtg_registered = false;
}
/**
@@ -1035,9 +1037,9 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
- if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
- src_w, src_h,
- dst_w, dst_h)) {
+ if (mode->clock && !sti_hqvdp_check_hw_scaling(hqvdp, mode,
+ src_w, src_h,
+ dst_w, dst_h)) {
DRM_ERROR("Scaling beyond HW capabilities\n");
return -EINVAL;
}
@@ -1064,10 +1066,11 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
return -EINVAL;
}
- if (!hqvdp->xp70_initialized) {
+ if (!hqvdp->xp70_initialized)
/* Start HQVDP XP70 coprocessor */
sti_hqvdp_start_xp70(hqvdp);
+ if (!hqvdp->vtg_registered) {
/* Prevent VTG shutdown */
if (clk_prepare_enable(hqvdp->clk_pix_main)) {
DRM_ERROR("Failed to prepare/enable pix main clk\n");
@@ -1081,6 +1084,7 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
DRM_ERROR("Cannot register VTG notifier\n");
return -EINVAL;
}
+ hqvdp->vtg_registered = true;
}
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
@@ -1113,6 +1117,21 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
if (!crtc || !fb)
return;
+ if ((oldstate->fb == state->fb) &&
+ (oldstate->crtc_x == state->crtc_x) &&
+ (oldstate->crtc_y == state->crtc_y) &&
+ (oldstate->crtc_w == state->crtc_w) &&
+ (oldstate->crtc_h == state->crtc_h) &&
+ (oldstate->src_x == state->src_x) &&
+ (oldstate->src_y == state->src_y) &&
+ (oldstate->src_w == state->src_w) &&
+ (oldstate->src_h == state->src_h)) {
+ /* No change since last update, do not post cmd */
+ DRM_DEBUG_DRIVER("No change, not posting cmd\n");
+ plane->status = STI_PLANE_UPDATED;
+ return;
+ }
+
mode = &crtc->mode;
dst_x = state->crtc_x;
dst_y = state->crtc_y;
@@ -1147,7 +1166,7 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
- (char *)&fb->pixel_format,
+ (char *)&fb->format->format,
(unsigned long)cma_obj->paddr);
/* Buffer planes address */
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 830a3c42d886..e64a00e61049 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -28,7 +28,6 @@ enum sti_mixer_status {
* @regs: mixer registers
* @id: id of the mixer
* @drm_crtc: crtc object link to the mixer
- * @pending_event: set if a flip event is pending on crtc
* @status: to know the status of the mixer
*/
struct sti_mixer {
@@ -36,7 +35,6 @@ struct sti_mixer {
void __iomem *regs;
int id;
struct drm_crtc drm_crtc;
- struct drm_pending_vblank_event *pending_event;
enum sti_mixer_status status;
};
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index ca4b3719a64a..427d8f58c6b1 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -65,9 +65,18 @@ void sti_plane_update_fps(struct sti_plane *plane,
fps->last_timestamp = now;
fps->last_frame_counter = fps->curr_frame_counter;
- fpks = (num_frames * 1000000) / ms_since_last;
- snprintf(plane->fps_info.fps_str, FPS_LENGTH, "%-6s @ %d.%.3d fps",
- sti_plane_to_str(plane), fpks / 1000, fpks % 1000);
+
+ if (plane->drm_plane.fb) {
+ fpks = (num_frames * 1000000) / ms_since_last;
+ snprintf(plane->fps_info.fps_str, FPS_LENGTH,
+ "%-8s %4dx%-4d %.4s @ %3d.%-3.3d fps (%s)",
+ plane->drm_plane.name,
+ plane->drm_plane.fb->width,
+ plane->drm_plane.fb->height,
+ (char *)&plane->drm_plane.fb->format->format,
+ fpks / 1000, fpks % 1000,
+ sti_plane_to_str(plane));
+ }
if (fps->curr_field_counter) {
/* Compute number of field updates */
@@ -75,7 +84,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
fps->last_field_counter = fps->curr_field_counter;
fipks = (num_fields * 1000000) / ms_since_last;
snprintf(plane->fps_info.fips_str,
- FPS_LENGTH, " - %d.%.3d field/sec",
+ FPS_LENGTH, " - %3d.%-3.3d field/sec",
fipks / 1000, fipks % 1000);
} else {
plane->fps_info.fips_str[0] = '\0';
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index ce3e8d6c88bb..c36c13faaa18 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -48,7 +48,7 @@ enum sti_plane_status {
STI_PLANE_DISABLED,
};
-#define FPS_LENGTH 64
+#define FPS_LENGTH 128
struct sti_fps_info {
bool output;
unsigned int curr_frame_counter;
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index ad46d3558d91..8b8ea717c121 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -567,13 +567,6 @@ static struct drm_info_list tvout_debugfs_files[] = {
{ "tvout", tvout_dbg_show, 0, NULL },
};
-static void tvout_debugfs_exit(struct sti_tvout *tvout, struct drm_minor *minor)
-{
- drm_debugfs_remove_files(tvout_debugfs_files,
- ARRAY_SIZE(tvout_debugfs_files),
- minor);
-}
-
static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
{
unsigned int i;
@@ -627,7 +620,6 @@ static void sti_tvout_early_unregister(struct drm_encoder *encoder)
if (!tvout->debugfs_registered)
return;
- tvout_debugfs_exit(tvout, encoder->dev->primary);
tvout->debugfs_registered = false;
}
diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c
deleted file mode 100644
index cf7fe8a1db42..000000000000
--- a/drivers/gpu/drm/sti/sti_vtac.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-#include <drm/drmP.h>
-
-#include "sti_drv.h"
-
-/* registers offset */
-#define VTAC_CONFIG 0x00
-#define VTAC_RX_FIFO_CONFIG 0x04
-#define VTAC_FIFO_CONFIG_VAL 0x04
-
-#define VTAC_SYS_CFG8521 0x824
-#define VTAC_SYS_CFG8522 0x828
-
-/* Number of phyts per pixel */
-#define VTAC_2_5_PPP 0x0005
-#define VTAC_3_PPP 0x0006
-#define VTAC_4_PPP 0x0008
-#define VTAC_5_PPP 0x000A
-#define VTAC_6_PPP 0x000C
-#define VTAC_13_PPP 0x001A
-#define VTAC_14_PPP 0x001C
-#define VTAC_15_PPP 0x001E
-#define VTAC_16_PPP 0x0020
-#define VTAC_17_PPP 0x0022
-#define VTAC_18_PPP 0x0024
-
-/* enable bits */
-#define VTAC_ENABLE 0x3003
-
-#define VTAC_TX_PHY_ENABLE_CLK_PHY BIT(0)
-#define VTAC_TX_PHY_ENABLE_CLK_DLL BIT(1)
-#define VTAC_TX_PHY_PLL_NOT_OSC_MODE BIT(3)
-#define VTAC_TX_PHY_RST_N_DLL_SWITCH BIT(4)
-#define VTAC_TX_PHY_PROG_N3 BIT(9)
-
-
-/**
- * VTAC mode structure
- *
- * @vid_in_width: Video Data Resolution
- * @phyts_width: Width of phyt buses(phyt low and phyt high).
- * @phyts_per_pixel: Number of phyts sent per pixel
- */
-struct sti_vtac_mode {
- u32 vid_in_width;
- u32 phyts_width;
- u32 phyts_per_pixel;
-};
-
-static const struct sti_vtac_mode vtac_mode_main = {
- .vid_in_width = 0x2,
- .phyts_width = 0x2,
- .phyts_per_pixel = VTAC_5_PPP,
-};
-static const struct sti_vtac_mode vtac_mode_aux = {
- .vid_in_width = 0x1,
- .phyts_width = 0x0,
- .phyts_per_pixel = VTAC_17_PPP,
-};
-
-/**
- * VTAC structure
- *
- * @dev: pointer to device structure
- * @regs: ioremapped registers for RX and TX devices
- * @phy_regs: phy registers for TX device
- * @clk: clock
- * @mode: main or auxillary configuration mode
- */
-struct sti_vtac {
- struct device *dev;
- void __iomem *regs;
- void __iomem *phy_regs;
- struct clk *clk;
- const struct sti_vtac_mode *mode;
-};
-
-static void sti_vtac_rx_set_config(struct sti_vtac *vtac)
-{
- u32 config;
-
- /* Enable VTAC clock */
- if (clk_prepare_enable(vtac->clk))
- DRM_ERROR("Failed to prepare/enable vtac_rx clock.\n");
-
- writel(VTAC_FIFO_CONFIG_VAL, vtac->regs + VTAC_RX_FIFO_CONFIG);
-
- config = VTAC_ENABLE;
- config |= vtac->mode->vid_in_width << 4;
- config |= vtac->mode->phyts_width << 16;
- config |= vtac->mode->phyts_per_pixel << 23;
- writel(config, vtac->regs + VTAC_CONFIG);
-}
-
-static void sti_vtac_tx_set_config(struct sti_vtac *vtac)
-{
- u32 phy_config;
- u32 config;
-
- /* Enable VTAC clock */
- if (clk_prepare_enable(vtac->clk))
- DRM_ERROR("Failed to prepare/enable vtac_tx clock.\n");
-
- /* Configure vtac phy */
- phy_config = 0x00000000;
- writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8522);
- phy_config = VTAC_TX_PHY_ENABLE_CLK_PHY;
- writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
- phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
- phy_config |= VTAC_TX_PHY_PROG_N3;
- writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
- phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
- phy_config |= VTAC_TX_PHY_ENABLE_CLK_DLL;
- writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
- phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
- phy_config |= VTAC_TX_PHY_RST_N_DLL_SWITCH;
- writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
- phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
- phy_config |= VTAC_TX_PHY_PLL_NOT_OSC_MODE;
- writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
-
- /* Configure vtac tx */
- config = VTAC_ENABLE;
- config |= vtac->mode->vid_in_width << 4;
- config |= vtac->mode->phyts_width << 16;
- config |= vtac->mode->phyts_per_pixel << 23;
- writel(config, vtac->regs + VTAC_CONFIG);
-}
-
-static const struct of_device_id vtac_of_match[] = {
- {
- .compatible = "st,vtac-main",
- .data = &vtac_mode_main,
- }, {
- .compatible = "st,vtac-aux",
- .data = &vtac_mode_aux,
- }, {
- /* end node */
- }
-};
-MODULE_DEVICE_TABLE(of, vtac_of_match);
-
-static int sti_vtac_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- const struct of_device_id *id;
- struct sti_vtac *vtac;
- struct resource *res;
-
- vtac = devm_kzalloc(dev, sizeof(*vtac), GFP_KERNEL);
- if (!vtac)
- return -ENOMEM;
-
- vtac->dev = dev;
-
- id = of_match_node(vtac_of_match, np);
- if (!id)
- return -ENOMEM;
-
- vtac->mode = id->data;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Invalid resource\n");
- return -ENOMEM;
- }
- vtac->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(vtac->regs))
- return PTR_ERR(vtac->regs);
-
-
- vtac->clk = devm_clk_get(dev, "vtac");
- if (IS_ERR(vtac->clk)) {
- DRM_ERROR("Cannot get vtac clock\n");
- return PTR_ERR(vtac->clk);
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- vtac->phy_regs = devm_ioremap_nocache(dev, res->start,
- resource_size(res));
- sti_vtac_tx_set_config(vtac);
- } else {
-
- sti_vtac_rx_set_config(vtac);
- }
-
- platform_set_drvdata(pdev, vtac);
- DRM_INFO("%s %s\n", __func__, dev_name(vtac->dev));
-
- return 0;
-}
-
-static int sti_vtac_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-struct platform_driver sti_vtac_driver = {
- .driver = {
- .name = "sti-vtac",
- .owner = THIS_MODULE,
- .of_match_table = vtac_of_match,
- },
- .probe = sti_vtac_probe,
- .remove = sti_vtac_remove,
-};
-
-MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index a8882bdd0f8b..2dcba1d3a122 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -17,7 +17,6 @@
#include "sti_vtg.h"
#define VTG_MODE_MASTER 0
-#define VTG_MODE_SLAVE_BY_EXT0 1
/* registers offset */
#define VTG_MODE 0x0000
@@ -132,7 +131,6 @@ struct sti_vtg_sync_params {
* @irq_status: store the IRQ status value
* @notifier_list: notifier callback
* @crtc: the CRTC for vblank event
- * @slave: slave vtg
* @link: List node to link the structure in lookup list
*/
struct sti_vtg {
@@ -144,7 +142,6 @@ struct sti_vtg {
u32 irq_status;
struct raw_notifier_head notifier_list;
struct drm_crtc *crtc;
- struct sti_vtg *slave;
struct list_head link;
};
@@ -166,10 +163,6 @@ struct sti_vtg *of_vtg_find(struct device_node *np)
static void vtg_reset(struct sti_vtg *vtg)
{
- /* reset slave and then master */
- if (vtg->slave)
- vtg_reset(vtg->slave);
-
writel(1, vtg->regs + VTG_DRST_AUTOC);
}
@@ -259,10 +252,6 @@ static void vtg_set_mode(struct sti_vtg *vtg,
{
unsigned int i;
- if (vtg->slave)
- vtg_set_mode(vtg->slave, VTG_MODE_SLAVE_BY_EXT0,
- vtg->sync_params, mode);
-
/* Set the number of clock cycles per line */
writel(mode->htotal, vtg->regs + VTG_CLKLN);
@@ -318,11 +307,7 @@ void sti_vtg_set_config(struct sti_vtg *vtg,
vtg_reset(vtg);
- /* enable irq for the vtg vblank synchro */
- if (vtg->slave)
- vtg_enable_irq(vtg->slave);
- else
- vtg_enable_irq(vtg);
+ vtg_enable_irq(vtg);
}
/**
@@ -365,18 +350,12 @@ u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x)
int sti_vtg_register_client(struct sti_vtg *vtg, struct notifier_block *nb,
struct drm_crtc *crtc)
{
- if (vtg->slave)
- return sti_vtg_register_client(vtg->slave, nb, crtc);
-
vtg->crtc = crtc;
return raw_notifier_chain_register(&vtg->notifier_list, nb);
}
int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb)
{
- if (vtg->slave)
- return sti_vtg_unregister_client(vtg->slave, nb);
-
return raw_notifier_chain_unregister(&vtg->notifier_list, nb);
}
@@ -410,7 +389,6 @@ static irqreturn_t vtg_irq(int irq, void *arg)
static int vtg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np;
struct sti_vtg *vtg;
struct resource *res;
int ret;
@@ -429,30 +407,25 @@ static int vtg_probe(struct platform_device *pdev)
return -ENOMEM;
}
vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!vtg->regs) {
+ DRM_ERROR("failed to remap I/O memory\n");
+ return -ENOMEM;
+ }
- np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
- if (np) {
- vtg->slave = of_vtg_find(np);
- of_node_put(np);
+ vtg->irq = platform_get_irq(pdev, 0);
+ if (vtg->irq < 0) {
+ DRM_ERROR("Failed to get VTG interrupt\n");
+ return vtg->irq;
+ }
- if (!vtg->slave)
- return -EPROBE_DEFER;
- } else {
- vtg->irq = platform_get_irq(pdev, 0);
- if (vtg->irq < 0) {
- DRM_ERROR("Failed to get VTG interrupt\n");
- return vtg->irq;
- }
-
- RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list);
-
- ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
- vtg_irq_thread, IRQF_ONESHOT,
- dev_name(dev), vtg);
- if (ret < 0) {
- DRM_ERROR("Failed to register VTG interrupt\n");
- return ret;
- }
+ RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list);
+
+ ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
+ vtg_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), vtg);
+ if (ret < 0) {
+ DRM_ERROR("Failed to register VTG interrupt\n");
+ return ret;
}
vtg_register(vtg);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 2e08f969bb64..08ce15070f80 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -189,10 +189,11 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
interlaced ? "on" : "off");
- ret = sun4i_backend_drm_format_to_layer(plane, fb->pixel_format, &val);
+ ret = sun4i_backend_drm_format_to_layer(plane, fb->format->format,
+ &val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid format\n");
- return val;
+ return ret;
}
regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG1(layer),
@@ -218,7 +219,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
/* Compute the start of the displayed memory */
- bpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ bpp = fb->format->cpp[0];
paddr = gem->paddr + fb->offsets[0];
paddr += (state->src_x >> 16) * bpp;
paddr += (state->src_y >> 16) * fb->pitches[0];
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 8b6ce619ad81..2c3beff8b53e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -40,9 +40,7 @@ struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm)
drm->mode_config.funcs = &sun4i_de_mode_config_funcs;
- return drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_crtc,
- drm->mode_config.num_connector);
+ return drm_fbdev_cma_init(drm, 32, drm->mode_config.num_connector);
}
void sun4i_framebuffer_free(struct drm_device *drm)
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index f5e86fe7750e..757208f51731 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -208,6 +208,7 @@ int sun4i_rgb_init(struct drm_device *drm)
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_tcon *tcon = drv->tcon;
struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
struct sun4i_rgb *rgb;
int ret;
@@ -218,8 +219,8 @@ int sun4i_rgb_init(struct drm_device *drm)
encoder = &rgb->encoder;
tcon->panel = sun4i_tcon_find_panel(tcon->dev->of_node);
- encoder->bridge = sun4i_tcon_find_bridge(tcon->dev->of_node);
- if (IS_ERR(tcon->panel) && IS_ERR(encoder->bridge)) {
+ bridge = sun4i_tcon_find_bridge(tcon->dev->of_node);
+ if (IS_ERR(tcon->panel) && IS_ERR(bridge)) {
dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
return 0;
}
@@ -260,16 +261,12 @@ int sun4i_rgb_init(struct drm_device *drm)
}
}
- if (!IS_ERR(encoder->bridge)) {
- encoder->bridge->encoder = &rgb->encoder;
-
- ret = drm_bridge_attach(drm, encoder->bridge);
+ if (!IS_ERR(bridge)) {
+ ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
}
- } else {
- encoder->bridge = NULL;
}
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 4010d69cbd08..7561a95a54e3 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -511,7 +511,7 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
if (!state->crtc)
return 0;
- err = tegra_dc_format(state->fb->pixel_format, &plane_state->format,
+ err = tegra_dc_format(state->fb->format->format, &plane_state->format,
&plane_state->swap);
if (err < 0)
return err;
@@ -531,7 +531,7 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
* error out if the user tries to display a framebuffer with such a
* configuration.
*/
- if (drm_format_num_planes(state->fb->pixel_format) > 2) {
+ if (state->fb->format->num_planes > 2) {
if (state->fb->pitches[2] != state->fb->pitches[1]) {
DRM_ERROR("unsupported UV-plane configuration\n");
return -EINVAL;
@@ -568,7 +568,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.dst.y = plane->state->crtc_y;
window.dst.w = plane->state->crtc_w;
window.dst.h = plane->state->crtc_h;
- window.bits_per_pixel = fb->bits_per_pixel;
+ window.bits_per_pixel = fb->format->cpp[0] * 8;
window.bottom_up = tegra_fb_is_bottom_up(fb);
/* copy from state */
@@ -576,7 +576,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.format = state->format;
window.swap = state->swap;
- for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+ for (i = 0; i < fb->format->num_planes; i++) {
struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
window.base[i] = bo->paddr + fb->offsets[i];
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index b8be3ee4d3b8..ef215fef63d6 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -214,7 +214,7 @@ free:
return err;
}
-static int tegra_drm_unload(struct drm_device *drm)
+static void tegra_drm_unload(struct drm_device *drm)
{
struct host1x_device *device = to_host1x_device(drm->dev);
struct tegra_drm *tegra = drm->dev_private;
@@ -227,7 +227,7 @@ static int tegra_drm_unload(struct drm_device *drm)
err = host1x_device_exit(device);
if (err < 0)
- return err;
+ return;
if (tegra->domain) {
iommu_domain_free(tegra->domain);
@@ -235,8 +235,6 @@ static int tegra_drm_unload(struct drm_device *drm)
}
kfree(tegra);
-
- return 0;
}
static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
@@ -806,23 +804,10 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
- unsigned int pipe)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
- if (pipe == drm_crtc_index(crtc))
- return crtc;
- }
-
- return NULL;
-}
-
static u32 tegra_drm_get_vblank_counter(struct drm_device *drm,
unsigned int pipe)
{
- struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct drm_crtc *crtc = drm_crtc_from_index(drm, pipe);
struct tegra_dc *dc = to_tegra_dc(crtc);
if (!crtc)
@@ -833,7 +818,7 @@ static u32 tegra_drm_get_vblank_counter(struct drm_device *drm,
static int tegra_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
{
- struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct drm_crtc *crtc = drm_crtc_from_index(drm, pipe);
struct tegra_dc *dc = to_tegra_dc(crtc);
if (!crtc)
@@ -846,7 +831,7 @@ static int tegra_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
static void tegra_drm_disable_vblank(struct drm_device *drm, unsigned int pipe)
{
- struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+ struct drm_crtc *crtc = drm_crtc_from_index(drm, pipe);
struct tegra_dc *dc = to_tegra_dc(crtc);
if (crtc)
@@ -875,8 +860,9 @@ static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
- fb->base.id, fb->width, fb->height, fb->depth,
- fb->bits_per_pixel,
+ fb->base.id, fb->width, fb->height,
+ fb->format->depth,
+ fb->format->cpp[0] * 8,
drm_framebuffer_read_refcount(fb));
}
@@ -890,8 +876,11 @@ static int tegra_debugfs_iova(struct seq_file *s, void *data)
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct drm_device *drm = node->minor->dev;
struct tegra_drm *tegra = drm->dev_private;
+ struct drm_printer p = drm_seq_file_printer(s);
+
+ drm_mm_print(&tegra->mm, &p);
- return drm_mm_dump_table(s, &tegra->mm);
+ return 0;
}
static struct drm_info_list tegra_debugfs_list[] = {
@@ -905,12 +894,6 @@ static int tegra_debugfs_init(struct drm_minor *minor)
ARRAY_SIZE(tegra_debugfs_list),
minor->debugfs_root, minor);
}
-
-static void tegra_debugfs_cleanup(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(tegra_debugfs_list,
- ARRAY_SIZE(tegra_debugfs_list), minor);
-}
#endif
static struct drm_driver tegra_drm_driver = {
@@ -928,7 +911,6 @@ static struct drm_driver tegra_drm_driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = tegra_debugfs_init,
- .debugfs_cleanup = tegra_debugfs_cleanup,
#endif
.gem_free_object_unlocked = tegra_bo_free_object,
@@ -991,10 +973,6 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (err < 0)
goto unref;
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
- driver->major, driver->minor, driver->patchlevel,
- driver->date, drm->primary->index);
-
return 0;
unref:
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 0ddcce1b420d..5205790dd679 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -17,6 +17,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e4a5ab0a9677..f142f6a4db25 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -32,7 +32,7 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
- if (index >= drm_format_num_planes(framebuffer->pixel_format))
+ if (index >= framebuffer->format->num_planes)
return NULL;
return fb->planes[index];
@@ -114,7 +114,7 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
fb->num_planes = num_planes;
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(drm, &fb->base, mode_cmd);
for (i = 0; i < fb->num_planes; i++)
fb->planes[i] = planes[i];
@@ -246,7 +246,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info->flags = FBINFO_FLAG_DEFAULT;
info->fbops = &tegra_fb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
offset = info->var.xoffset * bytes_per_pixel +
@@ -271,8 +271,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
return 0;
destroy:
- drm_framebuffer_unregister_private(fb);
- tegra_fb_destroy(fb);
+ drm_framebuffer_remove(fb);
release:
drm_fb_helper_release_fbi(helper);
return err;
@@ -310,7 +309,7 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
struct drm_device *drm = fbdev->base.dev;
int err;
- err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
+ err = drm_fb_helper_init(drm, &fbdev->base, max_connectors);
if (err < 0) {
dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
err);
@@ -342,10 +341,8 @@ static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
drm_fb_helper_unregister_fbi(&fbdev->base);
drm_fb_helper_release_fbi(&fbdev->base);
- if (fbdev->fb) {
- drm_framebuffer_unregister_private(&fbdev->fb->base);
+ if (fbdev->fb)
drm_framebuffer_remove(&fbdev->fb->base);
- }
drm_fb_helper_fini(&fbdev->base);
tegra_fbdev_free(fbdev);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 7d853e6b5ff0..b523a5d4a38c 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -128,8 +128,8 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
if (!bo->mm)
return -ENOMEM;
- err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
- PAGE_SIZE, 0, 0, 0);
+ err = drm_mm_insert_node_generic(&tegra->mm,
+ bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
if (err < 0) {
dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
err);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 6dfdb145f3bb..f80bf9385e41 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -91,7 +91,7 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
start = gem->paddr + fb->offsets[0] +
crtc->y * fb->pitches[0] +
- crtc->x * drm_format_plane_cpp(fb->pixel_format, 0);
+ crtc->x * fb->format->cpp[0];
end = start + (crtc->mode.vdisplay * fb->pitches[0]);
@@ -399,7 +399,7 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
if (info->tft_alt_mode)
reg |= LCDC_TFT_ALT_ENABLE;
if (priv->rev == 2) {
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_BGR565:
case DRM_FORMAT_RGB565:
break;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index bd0a3bd07167..372d86fbb093 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -403,8 +403,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
drm_mode_config_reset(ddev);
priv->fbdev = drm_fbdev_cma_init(ddev, bpp,
- ddev->mode_config.num_crtc,
- ddev->mode_config.num_connector);
+ ddev->mode_config.num_connector);
if (IS_ERR(priv->fbdev)) {
ret = PTR_ERR(priv->fbdev);
goto init_failed;
@@ -507,7 +506,9 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
+ struct drm_printer p = drm_seq_file_printer(m);
+ drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
+ return 0;
}
static struct drm_info_list tilcdc_debugfs_list[] = {
@@ -537,17 +538,6 @@ static int tilcdc_debugfs_init(struct drm_minor *minor)
return ret;
}
-
-static void tilcdc_debugfs_cleanup(struct drm_minor *minor)
-{
- struct tilcdc_module *mod;
- drm_debugfs_remove_files(tilcdc_debugfs_list,
- ARRAY_SIZE(tilcdc_debugfs_list), minor);
-
- list_for_each_entry(mod, &module_list, list)
- if (mod->funcs->debugfs_cleanup)
- mod->funcs->debugfs_cleanup(mod, minor);
-}
#endif
static const struct file_operations fops = {
@@ -587,7 +577,6 @@ static struct drm_driver tilcdc_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
- .debugfs_cleanup = tilcdc_debugfs_cleanup,
#endif
.fops = &fops,
.name = "tilcdc",
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 0e71daf5b5cb..8caa11bc7aec 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -111,8 +111,6 @@ struct tilcdc_module_ops {
#ifdef CONFIG_DEBUG_FS
/* create debugfs nodes (can be NULL): */
int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor);
- /* cleanup debugfs nodes (can be NULL): */
- void (*debugfs_cleanup)(struct tilcdc_module *mod, struct drm_minor *minor);
#endif
};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index c67d7cd7d57e..b0dd5e8634ae 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -167,10 +167,8 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
int ret;
priv->external_encoder->possible_crtcs = BIT(0);
- priv->external_encoder->bridge = bridge;
- bridge->encoder = priv->external_encoder;
- ret = drm_bridge_attach(ddev, bridge);
+ ret = drm_bridge_attach(priv->external_encoder, bridge, NULL);
if (ret) {
dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 8a6a50d74aff..ba0d66c0d8ac 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -69,7 +69,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
}
pitch = crtc_state->mode.hdisplay *
- drm_format_plane_cpp(state->fb->pixel_format, 0);
+ state->fb->format->cpp[0];
if (state->fb->pitches[0] != pitch) {
dev_err(plane->dev->dev,
"Invalid pitch: fb and crtc widths must be the same");
@@ -77,7 +77,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
}
if (state->fb && old_state->fb &&
- state->fb->pixel_format != old_state->fb->pixel_format) {
+ state->fb->format != old_state->fb->format) {
dev_dbg(plane->dev->dev,
"%s(): pixel format change requires mode_change\n",
__func__);
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
new file mode 100644
index 000000000000..3504c53846da
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -0,0 +1,21 @@
+menuconfig DRM_TINYDRM
+ tristate "Support for simple displays"
+ depends on DRM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ Choose this option if you have a tinydrm supported display.
+ If M is selected the module will be called tinydrm.
+
+config TINYDRM_MIPI_DBI
+ tristate
+
+config TINYDRM_MI0283QT
+ tristate "DRM support for MI0283QT"
+ depends on DRM_TINYDRM && SPI
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver for the Multi-Inno MI0283QT display panel
+ If M is selected the module will be called mi0283qt.
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
new file mode 100644
index 000000000000..7a3604cf4fc2
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_DRM_TINYDRM) += core/
+
+# Controllers
+obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
+
+# Displays
+obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
diff --git a/drivers/gpu/drm/tinydrm/core/Makefile b/drivers/gpu/drm/tinydrm/core/Makefile
new file mode 100644
index 000000000000..fb221e6f8885
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/core/Makefile
@@ -0,0 +1,3 @@
+tinydrm-y := tinydrm-core.o tinydrm-pipe.o tinydrm-helpers.o
+
+obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
new file mode 100644
index 000000000000..6a257dd08ee0
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/tinydrm/tinydrm.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides driver helpers for very simple display hardware.
+ *
+ * It is based on &drm_simple_display_pipe coupled with a &drm_connector which
+ * has only one fixed &drm_display_mode. The framebuffers are backed by the
+ * cma helper and have support for framebuffer flushing (dirty).
+ * fbdev support is also included.
+ *
+ */
+
+/**
+ * DOC: core
+ *
+ * The driver allocates &tinydrm_device, initializes it using
+ * devm_tinydrm_init(), sets up the pipeline using tinydrm_display_pipe_init()
+ * and registers the DRM device using devm_tinydrm_register().
+ */
+
+/**
+ * tinydrm_lastclose - DRM lastclose helper
+ * @drm: DRM device
+ *
+ * This function ensures that fbdev is restored when drm_lastclose() is called
+ * on the last drm_release(). Drivers can use this as their
+ * &drm_driver->lastclose callback.
+ */
+void tinydrm_lastclose(struct drm_device *drm)
+{
+ struct tinydrm_device *tdev = drm->dev_private;
+
+ DRM_DEBUG_KMS("\n");
+ drm_fbdev_cma_restore_mode(tdev->fbdev_cma);
+}
+EXPORT_SYMBOL(tinydrm_lastclose);
+
+/**
+ * tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from
+ * another driver's scatter/gather table of pinned pages
+ * @drm: DRM device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver using drm_gem_cma_prime_import_sg_table(). It sets the
+ * kernel virtual address on the CMA object. Drivers should use this as their
+ * &drm_driver->gem_prime_import_sg_table callback if they need the virtual
+ * address. tinydrm_gem_cma_free_object() should be used in combination with
+ * this function.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *obj;
+ void *vaddr;
+
+ vaddr = dma_buf_vmap(attach->dmabuf);
+ if (!vaddr) {
+ DRM_ERROR("Failed to vmap PRIME buffer\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ obj = drm_gem_cma_prime_import_sg_table(drm, attach, sgt);
+ if (IS_ERR(obj)) {
+ dma_buf_vunmap(attach->dmabuf, vaddr);
+ return obj;
+ }
+
+ cma_obj = to_drm_gem_cma_obj(obj);
+ cma_obj->vaddr = vaddr;
+
+ return obj;
+}
+EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table);
+
+/**
+ * tinydrm_gem_cma_free_object - Free resources associated with a CMA GEM
+ * object
+ * @gem_obj: GEM object to free
+ *
+ * This function frees the backing memory of the CMA GEM object, cleans up the
+ * GEM object state and frees the memory used to store the object itself using
+ * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel
+ * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers
+ * can use this as their &drm_driver->gem_free_object callback.
+ */
+void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+{
+ if (gem_obj->import_attach) {
+ struct drm_gem_cma_object *cma_obj;
+
+ cma_obj = to_drm_gem_cma_obj(gem_obj);
+ dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
+ cma_obj->vaddr = NULL;
+ }
+
+ drm_gem_cma_free_object(gem_obj);
+}
+EXPORT_SYMBOL_GPL(tinydrm_gem_cma_free_object);
+
+const struct file_operations tinydrm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+EXPORT_SYMBOL(tinydrm_fops);
+
+static struct drm_framebuffer *
+tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct tinydrm_device *tdev = drm->dev_private;
+
+ return drm_fb_cma_create_with_funcs(drm, file_priv, mode_cmd,
+ tdev->fb_funcs);
+}
+
+static const struct drm_mode_config_funcs tinydrm_mode_config_funcs = {
+ .fb_create = tinydrm_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
+ const struct drm_framebuffer_funcs *fb_funcs,
+ struct drm_driver *driver)
+{
+ struct drm_device *drm;
+
+ mutex_init(&tdev->dirty_lock);
+ tdev->fb_funcs = fb_funcs;
+
+ /*
+ * We don't embed drm_device, because that prevent us from using
+ * devm_kzalloc() to allocate tinydrm_device in the driver since
+ * drm_dev_unref() frees the structure. The devm_ functions provide
+ * for easy error handling.
+ */
+ drm = drm_dev_alloc(driver, parent);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ tdev->drm = drm;
+ drm->dev_private = tdev;
+ drm_mode_config_init(drm);
+ drm->mode_config.funcs = &tinydrm_mode_config_funcs;
+
+ return 0;
+}
+
+static void tinydrm_fini(struct tinydrm_device *tdev)
+{
+ drm_mode_config_cleanup(tdev->drm);
+ mutex_destroy(&tdev->dirty_lock);
+ tdev->drm->dev_private = NULL;
+ drm_dev_unref(tdev->drm);
+}
+
+static void devm_tinydrm_release(void *data)
+{
+ tinydrm_fini(data);
+}
+
+/**
+ * devm_tinydrm_init - Initialize tinydrm device
+ * @parent: Parent device object
+ * @tdev: tinydrm device
+ * @fb_funcs: Framebuffer functions
+ * @driver: DRM driver
+ *
+ * This function initializes @tdev, the underlying DRM device and it's
+ * mode_config. Resources will be automatically freed on driver detach (devres)
+ * using drm_mode_config_cleanup() and drm_dev_unref().
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
+ const struct drm_framebuffer_funcs *fb_funcs,
+ struct drm_driver *driver)
+{
+ int ret;
+
+ ret = tinydrm_init(parent, tdev, fb_funcs, driver);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action(parent, devm_tinydrm_release, tdev);
+ if (ret)
+ tinydrm_fini(tdev);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_tinydrm_init);
+
+static int tinydrm_register(struct tinydrm_device *tdev)
+{
+ struct drm_device *drm = tdev->drm;
+ int bpp = drm->mode_config.preferred_depth;
+ struct drm_fbdev_cma *fbdev;
+ int ret;
+
+ ret = drm_dev_register(tdev->drm, 0);
+ if (ret)
+ return ret;
+
+ fbdev = drm_fbdev_cma_init_with_funcs(drm, bpp ? bpp : 32,
+ drm->mode_config.num_connector,
+ tdev->fb_funcs);
+ if (IS_ERR(fbdev))
+ DRM_ERROR("Failed to initialize fbdev: %ld\n", PTR_ERR(fbdev));
+ else
+ tdev->fbdev_cma = fbdev;
+
+ return 0;
+}
+
+static void tinydrm_unregister(struct tinydrm_device *tdev)
+{
+ struct drm_fbdev_cma *fbdev_cma = tdev->fbdev_cma;
+
+ drm_crtc_force_disable_all(tdev->drm);
+ /* don't restore fbdev in lastclose, keep pipeline disabled */
+ tdev->fbdev_cma = NULL;
+ drm_dev_unregister(tdev->drm);
+ if (fbdev_cma)
+ drm_fbdev_cma_fini(fbdev_cma);
+}
+
+static void devm_tinydrm_register_release(void *data)
+{
+ tinydrm_unregister(data);
+}
+
+/**
+ * devm_tinydrm_register - Register tinydrm device
+ * @tdev: tinydrm device
+ *
+ * This function registers the underlying DRM device and fbdev.
+ * These resources will be automatically unregistered on driver detach (devres)
+ * and the display pipeline will be disabled.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int devm_tinydrm_register(struct tinydrm_device *tdev)
+{
+ struct device *dev = tdev->drm->dev;
+ int ret;
+
+ ret = tinydrm_register(tdev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action(dev, devm_tinydrm_register_release, tdev);
+ if (ret)
+ tinydrm_unregister(tdev);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_tinydrm_register);
+
+/**
+ * tinydrm_shutdown - Shutdown tinydrm
+ * @tdev: tinydrm device
+ *
+ * This function makes sure that the display pipeline is disabled.
+ * Used by drivers in their shutdown callback to turn off the display
+ * on machine shutdown and reboot.
+ */
+void tinydrm_shutdown(struct tinydrm_device *tdev)
+{
+ drm_crtc_force_disable_all(tdev->drm);
+}
+EXPORT_SYMBOL(tinydrm_shutdown);
+
+/**
+ * tinydrm_suspend - Suspend tinydrm
+ * @tdev: tinydrm device
+ *
+ * Used in driver PM operations to suspend tinydrm.
+ * Suspends fbdev and DRM.
+ * Resume with tinydrm_resume().
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int tinydrm_suspend(struct tinydrm_device *tdev)
+{
+ struct drm_atomic_state *state;
+
+ if (tdev->suspend_state) {
+ DRM_ERROR("Failed to suspend: state already set\n");
+ return -EINVAL;
+ }
+
+ drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 1);
+ state = drm_atomic_helper_suspend(tdev->drm);
+ if (IS_ERR(state)) {
+ drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);
+ return PTR_ERR(state);
+ }
+
+ tdev->suspend_state = state;
+
+ return 0;
+}
+EXPORT_SYMBOL(tinydrm_suspend);
+
+/**
+ * tinydrm_resume - Resume tinydrm
+ * @tdev: tinydrm device
+ *
+ * Used in driver PM operations to resume tinydrm.
+ * Suspend with tinydrm_suspend().
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int tinydrm_resume(struct tinydrm_device *tdev)
+{
+ struct drm_atomic_state *state = tdev->suspend_state;
+ int ret;
+
+ if (!state) {
+ DRM_ERROR("Failed to resume: state is not set\n");
+ return -EINVAL;
+ }
+
+ tdev->suspend_state = NULL;
+
+ ret = drm_atomic_helper_resume(tdev->drm, state);
+ if (ret) {
+ DRM_ERROR("Error resuming state: %d\n", ret);
+ return ret;
+ }
+
+ drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(tinydrm_resume);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
new file mode 100644
index 000000000000..3ccda6c1e159
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/tinydrm/tinydrm.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <linux/backlight.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+#include <linux/swab.h>
+
+static unsigned int spi_max;
+module_param(spi_max, uint, 0400);
+MODULE_PARM_DESC(spi_max, "Set a lower SPI max transfer size");
+
+/**
+ * tinydrm_merge_clips - Merge clip rectangles
+ * @dst: Destination clip rectangle
+ * @src: Source clip rectangle(s)
+ * @num_clips: Number of @src clip rectangles
+ * @flags: Dirty fb ioctl flags
+ * @max_width: Maximum width of @dst
+ * @max_height: Maximum height of @dst
+ *
+ * This function merges @src clip rectangle(s) into @dst. If @src is NULL,
+ * @max_width and @min_width is used to set a full @dst clip rectangle.
+ *
+ * Returns:
+ * true if it's a full clip, false otherwise
+ */
+bool tinydrm_merge_clips(struct drm_clip_rect *dst,
+ struct drm_clip_rect *src, unsigned int num_clips,
+ unsigned int flags, u32 max_width, u32 max_height)
+{
+ unsigned int i;
+
+ if (!src || !num_clips) {
+ dst->x1 = 0;
+ dst->x2 = max_width;
+ dst->y1 = 0;
+ dst->y2 = max_height;
+ return true;
+ }
+
+ dst->x1 = ~0;
+ dst->y1 = ~0;
+ dst->x2 = 0;
+ dst->y2 = 0;
+
+ for (i = 0; i < num_clips; i++) {
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY)
+ i++;
+ dst->x1 = min(dst->x1, src[i].x1);
+ dst->x2 = max(dst->x2, src[i].x2);
+ dst->y1 = min(dst->y1, src[i].y1);
+ dst->y2 = max(dst->y2, src[i].y2);
+ }
+
+ if (dst->x2 > max_width || dst->y2 > max_height ||
+ dst->x1 >= dst->x2 || dst->y1 >= dst->y2) {
+ DRM_DEBUG_KMS("Illegal clip: x1=%u, x2=%u, y1=%u, y2=%u\n",
+ dst->x1, dst->x2, dst->y1, dst->y2);
+ dst->x1 = 0;
+ dst->y1 = 0;
+ dst->x2 = max_width;
+ dst->y2 = max_height;
+ }
+
+ return (dst->x2 - dst->x1) == max_width &&
+ (dst->y2 - dst->y1) == max_height;
+}
+EXPORT_SYMBOL(tinydrm_merge_clips);
+
+/**
+ * tinydrm_memcpy - Copy clip buffer
+ * @dst: Destination buffer
+ * @vaddr: Source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ */
+void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip)
+{
+ unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ unsigned int pitch = fb->pitches[0];
+ void *src = vaddr + (clip->y1 * pitch) + (clip->x1 * cpp);
+ size_t len = (clip->x2 - clip->x1) * cpp;
+ unsigned int y;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ memcpy(dst, src, len);
+ src += pitch;
+ dst += len;
+ }
+}
+EXPORT_SYMBOL(tinydrm_memcpy);
+
+/**
+ * tinydrm_swab16 - Swap bytes into clip buffer
+ * @dst: RGB565 destination buffer
+ * @vaddr: RGB565 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ */
+void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip)
+{
+ size_t len = (clip->x2 - clip->x1) * sizeof(u16);
+ unsigned int x, y;
+ u16 *src, *buf;
+
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ src = vaddr + (y * fb->pitches[0]);
+ src += clip->x1;
+ memcpy(buf, src, len);
+ src = buf;
+ for (x = clip->x1; x < clip->x2; x++)
+ *dst++ = swab16(*src++);
+ }
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(tinydrm_swab16);
+
+/**
+ * tinydrm_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
+ * @dst: RGB565 destination buffer
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @swap: Swap bytes
+ *
+ * Drivers can use this function for RGB565 devices that don't natively
+ * support XRGB8888.
+ */
+void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip, bool swap)
+{
+ size_t len = (clip->x2 - clip->x1) * sizeof(u32);
+ unsigned int x, y;
+ u32 *src, *buf;
+ u16 val16;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ src = vaddr + (y * fb->pitches[0]);
+ src += clip->x1;
+ memcpy(buf, src, len);
+ src = buf;
+ for (x = clip->x1; x < clip->x2; x++) {
+ val16 = ((*src & 0x00F80000) >> 8) |
+ ((*src & 0x0000FC00) >> 5) |
+ ((*src & 0x000000F8) >> 3);
+ src++;
+ if (swap)
+ *dst++ = swab16(val16);
+ else
+ *dst++ = val16;
+ }
+ }
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565);
+
+/**
+ * tinydrm_of_find_backlight - Find backlight device in device-tree
+ * @dev: Device
+ *
+ * This function looks for a DT node pointed to by a property named 'backlight'
+ * and uses of_find_backlight_by_node() to get the backlight device.
+ * Additionally if the brightness property is zero, it is set to
+ * max_brightness.
+ *
+ * Returns:
+ * NULL if there's no backlight property.
+ * Error pointer -EPROBE_DEFER if the DT node is found, but no backlight device
+ * is found.
+ * If the backlight device is found, a pointer to the structure is returned.
+ */
+struct backlight_device *tinydrm_of_find_backlight(struct device *dev)
+{
+ struct backlight_device *backlight;
+ struct device_node *np;
+
+ np = of_parse_phandle(dev->of_node, "backlight", 0);
+ if (!np)
+ return NULL;
+
+ backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!backlight)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!backlight->props.brightness) {
+ backlight->props.brightness = backlight->props.max_brightness;
+ DRM_DEBUG_KMS("Backlight brightness set to %d\n",
+ backlight->props.brightness);
+ }
+
+ return backlight;
+}
+EXPORT_SYMBOL(tinydrm_of_find_backlight);
+
+/**
+ * tinydrm_enable_backlight - Enable backlight helper
+ * @backlight: Backlight device
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int tinydrm_enable_backlight(struct backlight_device *backlight)
+{
+ unsigned int old_state;
+ int ret;
+
+ if (!backlight)
+ return 0;
+
+ old_state = backlight->props.state;
+ backlight->props.state &= ~BL_CORE_FBBLANK;
+ DRM_DEBUG_KMS("Backlight state: 0x%x -> 0x%x\n", old_state,
+ backlight->props.state);
+
+ ret = backlight_update_status(backlight);
+ if (ret)
+ DRM_ERROR("Failed to enable backlight %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(tinydrm_enable_backlight);
+
+/**
+ * tinydrm_disable_backlight - Disable backlight helper
+ * @backlight: Backlight device
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int tinydrm_disable_backlight(struct backlight_device *backlight)
+{
+ unsigned int old_state;
+ int ret;
+
+ if (!backlight)
+ return 0;
+
+ old_state = backlight->props.state;
+ backlight->props.state |= BL_CORE_FBBLANK;
+ DRM_DEBUG_KMS("Backlight state: 0x%x -> 0x%x\n", old_state,
+ backlight->props.state);
+ ret = backlight_update_status(backlight);
+ if (ret)
+ DRM_ERROR("Failed to disable backlight %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(tinydrm_disable_backlight);
+
+#if IS_ENABLED(CONFIG_SPI)
+
+/**
+ * tinydrm_spi_max_transfer_size - Determine max SPI transfer size
+ * @spi: SPI device
+ * @max_len: Maximum buffer size needed (optional)
+ *
+ * This function returns the maximum size to use for SPI transfers. It checks
+ * the SPI master, the optional @max_len and the module parameter spi_max and
+ * returns the smallest.
+ *
+ * Returns:
+ * Maximum size for SPI transfers
+ */
+size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len)
+{
+ size_t ret;
+
+ ret = min(spi_max_transfer_size(spi), spi->master->max_dma_len);
+ if (max_len)
+ ret = min(ret, max_len);
+ if (spi_max)
+ ret = min_t(size_t, ret, spi_max);
+ ret &= ~0x3;
+ if (ret < 4)
+ ret = 4;
+
+ return ret;
+}
+EXPORT_SYMBOL(tinydrm_spi_max_transfer_size);
+
+/**
+ * tinydrm_spi_bpw_supported - Check if bits per word is supported
+ * @spi: SPI device
+ * @bpw: Bits per word
+ *
+ * This function checks to see if the SPI master driver supports @bpw.
+ *
+ * Returns:
+ * True if @bpw is supported, false otherwise.
+ */
+bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw)
+{
+ u32 bpw_mask = spi->master->bits_per_word_mask;
+
+ if (bpw == 8)
+ return true;
+
+ if (!bpw_mask) {
+ dev_warn_once(&spi->dev,
+ "bits_per_word_mask not set, assume 8-bit only\n");
+ return false;
+ }
+
+ if (bpw_mask & SPI_BPW_MASK(bpw))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(tinydrm_spi_bpw_supported);
+
+static void
+tinydrm_dbg_spi_print(struct spi_device *spi, struct spi_transfer *tr,
+ const void *buf, int idx, bool tx)
+{
+ u32 speed_hz = tr->speed_hz ? tr->speed_hz : spi->max_speed_hz;
+ char linebuf[3 * 32];
+
+ hex_dump_to_buffer(buf, tr->len, 16,
+ DIV_ROUND_UP(tr->bits_per_word, 8),
+ linebuf, sizeof(linebuf), false);
+
+ printk(KERN_DEBUG
+ " tr(%i): speed=%u%s, bpw=%i, len=%u, %s_buf=[%s%s]\n", idx,
+ speed_hz > 1000000 ? speed_hz / 1000000 : speed_hz / 1000,
+ speed_hz > 1000000 ? "MHz" : "kHz", tr->bits_per_word, tr->len,
+ tx ? "tx" : "rx", linebuf, tr->len > 16 ? " ..." : "");
+}
+
+/* called through tinydrm_dbg_spi_message() */
+void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m)
+{
+ struct spi_transfer *tmp;
+ struct list_head *pos;
+ int i = 0;
+
+ list_for_each(pos, &m->transfers) {
+ tmp = list_entry(pos, struct spi_transfer, transfer_list);
+
+ if (tmp->tx_buf)
+ tinydrm_dbg_spi_print(spi, tmp, tmp->tx_buf, i, true);
+ if (tmp->rx_buf)
+ tinydrm_dbg_spi_print(spi, tmp, tmp->rx_buf, i, false);
+ i++;
+ }
+}
+EXPORT_SYMBOL(_tinydrm_dbg_spi_message);
+
+/**
+ * tinydrm_spi_transfer - SPI transfer helper
+ * @spi: SPI device
+ * @speed_hz: Override speed (optional)
+ * @header: Optional header transfer
+ * @bpw: Bits per word
+ * @buf: Buffer to transfer
+ * @len: Buffer length
+ *
+ * This SPI transfer helper breaks up the transfer of @buf into chunks which
+ * the SPI master driver can handle. If the machine is Little Endian and the
+ * SPI master driver doesn't support 16 bits per word, it swaps the bytes and
+ * does a 8-bit transfer.
+ * If @header is set, it is prepended to each SPI message.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
+ struct spi_transfer *header, u8 bpw, const void *buf,
+ size_t len)
+{
+ struct spi_transfer tr = {
+ .bits_per_word = bpw,
+ .speed_hz = speed_hz,
+ };
+ struct spi_message m;
+ u16 *swap_buf = NULL;
+ size_t max_chunk;
+ size_t chunk;
+ int ret = 0;
+
+ if (WARN_ON_ONCE(bpw != 8 && bpw != 16))
+ return -EINVAL;
+
+ max_chunk = tinydrm_spi_max_transfer_size(spi, 0);
+
+ if (drm_debug & DRM_UT_DRIVER)
+ pr_debug("[drm:%s] bpw=%u, max_chunk=%zu, transfers:\n",
+ __func__, bpw, max_chunk);
+
+ if (bpw == 16 && !tinydrm_spi_bpw_supported(spi, 16)) {
+ tr.bits_per_word = 8;
+ if (tinydrm_machine_little_endian()) {
+ swap_buf = kmalloc(min(len, max_chunk), GFP_KERNEL);
+ if (!swap_buf)
+ return -ENOMEM;
+ }
+ }
+
+ spi_message_init(&m);
+ if (header)
+ spi_message_add_tail(header, &m);
+ spi_message_add_tail(&tr, &m);
+
+ while (len) {
+ chunk = min(len, max_chunk);
+
+ tr.tx_buf = buf;
+ tr.len = chunk;
+
+ if (swap_buf) {
+ const u16 *buf16 = buf;
+ unsigned int i;
+
+ for (i = 0; i < chunk / 2; i++)
+ swap_buf[i] = swab16(buf16[i]);
+
+ tr.tx_buf = swap_buf;
+ }
+
+ buf += chunk;
+ len -= chunk;
+
+ tinydrm_dbg_spi_message(spi, &m);
+ ret = spi_sync(spi, &m);
+ if (ret)
+ return ret;
+ };
+
+ return 0;
+}
+EXPORT_SYMBOL(tinydrm_spi_transfer);
+
+#endif /* CONFIG_SPI */
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
new file mode 100644
index 000000000000..ec43fb7ad9e4
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modes.h>
+#include <drm/tinydrm/tinydrm.h>
+
+struct tinydrm_connector {
+ struct drm_connector base;
+ const struct drm_display_mode *mode;
+};
+
+static inline struct tinydrm_connector *
+to_tinydrm_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct tinydrm_connector, base);
+}
+
+static int tinydrm_connector_get_modes(struct drm_connector *connector)
+{
+ struct tinydrm_connector *tconn = to_tinydrm_connector(connector);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, tconn->mode);
+ if (!mode) {
+ DRM_ERROR("Failed to duplicate mode\n");
+ return 0;
+ }
+
+ if (mode->name[0] == '\0')
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ if (mode->width_mm) {
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ }
+
+ return 1;
+}
+
+static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = {
+ .get_modes = tinydrm_connector_get_modes,
+ .best_encoder = drm_atomic_helper_best_encoder,
+};
+
+static enum drm_connector_status
+tinydrm_connector_detect(struct drm_connector *connector, bool force)
+{
+ if (drm_device_is_unplugged(connector->dev))
+ return connector_status_disconnected;
+
+ return connector->status;
+}
+
+static void tinydrm_connector_destroy(struct drm_connector *connector)
+{
+ struct tinydrm_connector *tconn = to_tinydrm_connector(connector);
+
+ drm_connector_cleanup(connector);
+ kfree(tconn);
+}
+
+static const struct drm_connector_funcs tinydrm_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = tinydrm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tinydrm_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+struct drm_connector *
+tinydrm_connector_create(struct drm_device *drm,
+ const struct drm_display_mode *mode,
+ int connector_type)
+{
+ struct tinydrm_connector *tconn;
+ struct drm_connector *connector;
+ int ret;
+
+ tconn = kzalloc(sizeof(*tconn), GFP_KERNEL);
+ if (!tconn)
+ return ERR_PTR(-ENOMEM);
+
+ tconn->mode = mode;
+ connector = &tconn->base;
+
+ drm_connector_helper_add(connector, &tinydrm_connector_hfuncs);
+ ret = drm_connector_init(drm, connector, &tinydrm_connector_funcs,
+ connector_type);
+ if (ret) {
+ kfree(tconn);
+ return ERR_PTR(ret);
+ }
+
+ connector->status = connector_status_connected;
+
+ return connector;
+}
+
+/**
+ * tinydrm_display_pipe_update - Display pipe update helper
+ * @pipe: Simple display pipe
+ * @old_state: Old plane state
+ *
+ * This function does a full framebuffer flush if the plane framebuffer
+ * has changed. It also handles vblank events. Drivers can use this as their
+ * &drm_simple_display_pipe_funcs->update callback.
+ */
+void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct drm_framebuffer *fb = pipe->plane.state->fb;
+ struct drm_crtc *crtc = &tdev->pipe.crtc;
+
+ if (fb && (fb != old_state->fb)) {
+ pipe->plane.fb = fb;
+ if (fb->funcs->dirty)
+ fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
+ }
+
+ if (crtc->state->event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ crtc->state->event = NULL;
+ }
+}
+EXPORT_SYMBOL(tinydrm_display_pipe_update);
+
+/**
+ * tinydrm_display_pipe_prepare_fb - Display pipe prepare_fb helper
+ * @pipe: Simple display pipe
+ * @plane_state: Plane state
+ *
+ * This function uses drm_fb_cma_prepare_fb() to check if the plane FB has an
+ * dma-buf attached, extracts the exclusive fence and attaches it to plane
+ * state for the atomic helper to wait on. Drivers can use this as their
+ * &drm_simple_display_pipe_funcs->prepare_fb callback.
+ */
+int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ return drm_fb_cma_prepare_fb(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(tinydrm_display_pipe_prepare_fb);
+
+static int tinydrm_rotate_mode(struct drm_display_mode *mode,
+ unsigned int rotation)
+{
+ if (rotation == 0 || rotation == 180) {
+ return 0;
+ } else if (rotation == 90 || rotation == 270) {
+ swap(mode->hdisplay, mode->vdisplay);
+ swap(mode->hsync_start, mode->vsync_start);
+ swap(mode->hsync_end, mode->vsync_end);
+ swap(mode->htotal, mode->vtotal);
+ swap(mode->width_mm, mode->height_mm);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+/**
+ * tinydrm_display_pipe_init - Initialize display pipe
+ * @tdev: tinydrm device
+ * @funcs: Display pipe functions
+ * @connector_type: Connector type
+ * @formats: Array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: Number of elements in @formats
+ * @mode: Supported mode
+ * @rotation: Initial @mode rotation in degrees Counter Clock Wise
+ *
+ * This function sets up a &drm_simple_display_pipe with a &drm_connector that
+ * has one fixed &drm_display_mode which is rotated according to @rotation.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int
+tinydrm_display_pipe_init(struct tinydrm_device *tdev,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ int connector_type,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const struct drm_display_mode *mode,
+ unsigned int rotation)
+{
+ struct drm_device *drm = tdev->drm;
+ struct drm_display_mode *mode_copy;
+ struct drm_connector *connector;
+ int ret;
+
+ mode_copy = devm_kmalloc(drm->dev, sizeof(*mode_copy), GFP_KERNEL);
+ if (!mode_copy)
+ return -ENOMEM;
+
+ *mode_copy = *mode;
+ ret = tinydrm_rotate_mode(mode_copy, rotation);
+ if (ret) {
+ DRM_ERROR("Illegal rotation value %u\n", rotation);
+ return -EINVAL;
+ }
+
+ drm->mode_config.min_width = mode_copy->hdisplay;
+ drm->mode_config.max_width = mode_copy->hdisplay;
+ drm->mode_config.min_height = mode_copy->vdisplay;
+ drm->mode_config.max_height = mode_copy->vdisplay;
+
+ connector = tinydrm_connector_create(drm, mode_copy, connector_type);
+ if (IS_ERR(connector))
+ return PTR_ERR(connector);
+
+ ret = drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats,
+ format_count, connector);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
new file mode 100644
index 000000000000..b29fe86158f7
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -0,0 +1,279 @@
+/*
+ * DRM driver for Multi-Inno MI0283QT panels
+ *
+ * Copyright 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/tinydrm/ili9341.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <video/mipi_display.h>
+
+static int mi0283qt_init(struct mipi_dbi *mipi)
+{
+ struct tinydrm_device *tdev = &mipi->tinydrm;
+ struct device *dev = tdev->drm->dev;
+ u8 addr_mode;
+ int ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = regulator_enable(mipi->regulator);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulator %d\n", ret);
+ return ret;
+ }
+
+ /* Avoid flicker by skipping setup if the bootloader has done it */
+ if (mipi_dbi_display_is_on(mipi))
+ return 0;
+
+ mipi_dbi_hw_reset(mipi);
+ ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET);
+ if (ret) {
+ dev_err(dev, "Error sending command %d\n", ret);
+ regulator_disable(mipi->regulator);
+ return ret;
+ }
+
+ msleep(20);
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+
+ mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0x83, 0x30);
+ mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
+ mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x01, 0x79);
+ mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
+ mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20);
+ mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00);
+
+ /* Power Control */
+ mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x26);
+ mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x11);
+ /* VCOM */
+ mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x35, 0x3e);
+ mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0xbe);
+
+ /* Memory Access Control */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+
+ switch (mipi->rotation) {
+ default:
+ addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
+ ILI9341_MADCTL_MX;
+ break;
+ case 90:
+ addr_mode = ILI9341_MADCTL_MY;
+ break;
+ case 180:
+ addr_mode = ILI9341_MADCTL_MV;
+ break;
+ case 270:
+ addr_mode = ILI9341_MADCTL_MX;
+ break;
+ }
+ addr_mode |= ILI9341_MADCTL_BGR;
+ mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+
+ /* Frame Rate */
+ mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
+
+ /* Gamma */
+ mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x08);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
+ mipi_dbi_command(mipi, ILI9341_PGAMCTRL,
+ 0x1f, 0x1a, 0x18, 0x0a, 0x0f, 0x06, 0x45, 0x87,
+ 0x32, 0x0a, 0x07, 0x02, 0x07, 0x05, 0x00);
+ mipi_dbi_command(mipi, ILI9341_NGAMCTRL,
+ 0x00, 0x25, 0x27, 0x05, 0x10, 0x09, 0x3a, 0x78,
+ 0x4d, 0x05, 0x18, 0x0d, 0x38, 0x3a, 0x1f);
+
+ /* DDRAM */
+ mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07);
+
+ /* Display */
+ mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x0a, 0x82, 0x27, 0x00);
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(100);
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ msleep(100);
+
+ return 0;
+}
+
+static void mi0283qt_fini(void *data)
+{
+ struct mipi_dbi *mipi = data;
+
+ DRM_DEBUG_KMS("\n");
+ regulator_disable(mipi->regulator);
+}
+
+static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
+ .enable = mipi_dbi_pipe_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = tinydrm_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode mi0283qt_mode = {
+ TINYDRM_MODE(320, 240, 58, 43),
+};
+
+static struct drm_driver mi0283qt_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ TINYDRM_GEM_DRIVER_OPS,
+ .lastclose = tinydrm_lastclose,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "mi0283qt",
+ .desc = "Multi-Inno MI0283QT",
+ .date = "20160614",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id mi0283qt_of_match[] = {
+ { .compatible = "multi-inno,mi0283qt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mi0283qt_of_match);
+
+static const struct spi_device_id mi0283qt_id[] = {
+ { "mi0283qt", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, mi0283qt_id);
+
+static int mi0283qt_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct tinydrm_device *tdev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(mipi->reset)) {
+ dev_err(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(mipi->reset);
+ }
+
+ dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ dev_err(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ mipi->regulator = devm_regulator_get(dev, "power");
+ if (IS_ERR(mipi->regulator))
+ return PTR_ERR(mipi->regulator);
+
+ mipi->backlight = tinydrm_of_find_backlight(dev);
+ if (IS_ERR(mipi->backlight))
+ return PTR_ERR(mipi->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, dc, &mi0283qt_pipe_funcs,
+ &mi0283qt_driver, &mi0283qt_mode, rotation);
+ if (ret)
+ return ret;
+
+ ret = mi0283qt_init(mipi);
+ if (ret)
+ return ret;
+
+ /* use devres to fini after drm unregister (drv->remove is before) */
+ ret = devm_add_action(dev, mi0283qt_fini, mipi);
+ if (ret) {
+ mi0283qt_fini(mipi);
+ return ret;
+ }
+
+ tdev = &mipi->tinydrm;
+
+ ret = devm_tinydrm_register(tdev);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
+ tdev->drm->driver->name, dev_name(dev),
+ spi->max_speed_hz / 1000000,
+ tdev->drm->primary->index);
+
+ return 0;
+}
+
+static void mi0283qt_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static int __maybe_unused mi0283qt_pm_suspend(struct device *dev)
+{
+ struct mipi_dbi *mipi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = tinydrm_suspend(&mipi->tinydrm);
+ if (ret)
+ return ret;
+
+ mi0283qt_fini(mipi);
+
+ return 0;
+}
+
+static int __maybe_unused mi0283qt_pm_resume(struct device *dev)
+{
+ struct mipi_dbi *mipi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = mi0283qt_init(mipi);
+ if (ret)
+ return ret;
+
+ return tinydrm_resume(&mipi->tinydrm);
+}
+
+static const struct dev_pm_ops mi0283qt_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mi0283qt_pm_suspend, mi0283qt_pm_resume)
+};
+
+static struct spi_driver mi0283qt_spi_driver = {
+ .driver = {
+ .name = "mi0283qt",
+ .owner = THIS_MODULE,
+ .of_match_table = mi0283qt_of_match,
+ .pm = &mi0283qt_pm_ops,
+ },
+ .id_table = mi0283qt_id,
+ .probe = mi0283qt_probe,
+ .shutdown = mi0283qt_shutdown,
+};
+module_spi_driver(mi0283qt_spi_driver);
+
+MODULE_DESCRIPTION("Multi-Inno MI0283QT DRM driver");
+MODULE_AUTHOR("Noralf Trønnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
new file mode 100644
index 000000000000..29c0939f5247
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -0,0 +1,1005 @@
+/*
+ * MIPI Display Bus Interface (DBI) LCD controller support
+ *
+ * Copyright 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <video/mipi_display.h>
+
+#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
+
+#define DCS_POWER_MODE_DISPLAY BIT(2)
+#define DCS_POWER_MODE_DISPLAY_NORMAL_MODE BIT(3)
+#define DCS_POWER_MODE_SLEEP_MODE BIT(4)
+#define DCS_POWER_MODE_PARTIAL_MODE BIT(5)
+#define DCS_POWER_MODE_IDLE_MODE BIT(6)
+#define DCS_POWER_MODE_RESERVED_MASK (BIT(0) | BIT(1) | BIT(7))
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers for MIPI Display Bus Interface (DBI)
+ * compatible display controllers.
+ *
+ * Many controllers for tiny lcd displays are MIPI compliant and can use this
+ * library. If a controller uses registers 0x2A and 0x2B to set the area to
+ * update and uses register 0x2C to write to frame memory, it is most likely
+ * MIPI compliant.
+ *
+ * Only MIPI Type 1 displays are supported since a full frame memory is needed.
+ *
+ * There are 3 MIPI DBI implementation types:
+ *
+ * A. Motorola 6800 type parallel bus
+ *
+ * B. Intel 8080 type parallel bus
+ *
+ * C. SPI type with 3 options:
+ *
+ * 1. 9-bit with the Data/Command signal as the ninth bit
+ * 2. Same as above except it's sent as 16 bits
+ * 3. 8-bit with the Data/Command signal as a separate D/CX pin
+ *
+ * Currently mipi_dbi only supports Type C options 1 and 3 with
+ * mipi_dbi_spi_init().
+ */
+
+#define MIPI_DBI_DEBUG_COMMAND(cmd, data, len) \
+({ \
+ if (!len) \
+ DRM_DEBUG_DRIVER("cmd=%02x\n", cmd); \
+ else if (len <= 32) \
+ DRM_DEBUG_DRIVER("cmd=%02x, par=%*ph\n", cmd, (int)len, data);\
+ else \
+ DRM_DEBUG_DRIVER("cmd=%02x, len=%zu\n", cmd, len); \
+})
+
+static const u8 mipi_dbi_dcs_read_commands[] = {
+ MIPI_DCS_GET_DISPLAY_ID,
+ MIPI_DCS_GET_RED_CHANNEL,
+ MIPI_DCS_GET_GREEN_CHANNEL,
+ MIPI_DCS_GET_BLUE_CHANNEL,
+ MIPI_DCS_GET_DISPLAY_STATUS,
+ MIPI_DCS_GET_POWER_MODE,
+ MIPI_DCS_GET_ADDRESS_MODE,
+ MIPI_DCS_GET_PIXEL_FORMAT,
+ MIPI_DCS_GET_DISPLAY_MODE,
+ MIPI_DCS_GET_SIGNAL_MODE,
+ MIPI_DCS_GET_DIAGNOSTIC_RESULT,
+ MIPI_DCS_READ_MEMORY_START,
+ MIPI_DCS_READ_MEMORY_CONTINUE,
+ MIPI_DCS_GET_SCANLINE,
+ MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+ MIPI_DCS_GET_CONTROL_DISPLAY,
+ MIPI_DCS_GET_POWER_SAVE,
+ MIPI_DCS_GET_CABC_MIN_BRIGHTNESS,
+ MIPI_DCS_READ_DDB_START,
+ MIPI_DCS_READ_DDB_CONTINUE,
+ 0, /* sentinel */
+};
+
+static bool mipi_dbi_command_is_read(struct mipi_dbi *mipi, u8 cmd)
+{
+ unsigned int i;
+
+ if (!mipi->read_commands)
+ return false;
+
+ for (i = 0; i < 0xff; i++) {
+ if (!mipi->read_commands[i])
+ return false;
+ if (cmd == mipi->read_commands[i])
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * mipi_dbi_command_read - MIPI DCS read command
+ * @mipi: MIPI structure
+ * @cmd: Command
+ * @val: Value read
+ *
+ * Send MIPI DCS read command to the controller.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val)
+{
+ if (!mipi->read_commands)
+ return -EACCES;
+
+ if (!mipi_dbi_command_is_read(mipi, cmd))
+ return -EINVAL;
+
+ return mipi_dbi_command_buf(mipi, cmd, val, 1);
+}
+EXPORT_SYMBOL(mipi_dbi_command_read);
+
+/**
+ * mipi_dbi_command_buf - MIPI DCS command with parameter(s) in an array
+ * @mipi: MIPI structure
+ * @cmd: Command
+ * @data: Parameter buffer
+ * @len: Buffer length
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
+{
+ int ret;
+
+ mutex_lock(&mipi->cmdlock);
+ ret = mipi->command(mipi, cmd, data, len);
+ mutex_unlock(&mipi->cmdlock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mipi_dbi_command_buf);
+
+static int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip, bool swap)
+{
+ struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
+ struct drm_format_name_buf format_name;
+ void *src = cma_obj->vaddr;
+ int ret = 0;
+
+ if (import_attach) {
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+ }
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_RGB565:
+ if (swap)
+ tinydrm_swab16(dst, src, fb, clip);
+ else
+ tinydrm_memcpy(dst, src, fb, clip);
+ break;
+ case DRM_FORMAT_XRGB8888:
+ tinydrm_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
+ break;
+ default:
+ dev_err_once(fb->dev->dev, "Format is not supported: %s\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
+ return -EINVAL;
+ }
+
+ if (import_attach)
+ ret = dma_buf_end_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ return ret;
+}
+
+static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct tinydrm_device *tdev = fb->dev->dev_private;
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ bool swap = mipi->swap_bytes;
+ struct drm_clip_rect clip;
+ int ret = 0;
+ bool full;
+ void *tr;
+
+ mutex_lock(&tdev->dirty_lock);
+
+ if (!mipi->enabled)
+ goto out_unlock;
+
+ /* fbdev can flush even when we're not interested */
+ if (tdev->pipe.plane.fb != fb)
+ goto out_unlock;
+
+ full = tinydrm_merge_clips(&clip, clips, num_clips, flags,
+ fb->width, fb->height);
+
+ DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id,
+ clip.x1, clip.x2, clip.y1, clip.y2);
+
+ if (!mipi->dc || !full || swap ||
+ fb->format->format == DRM_FORMAT_XRGB8888) {
+ tr = mipi->tx_buf;
+ ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap);
+ if (ret)
+ goto out_unlock;
+ } else {
+ tr = cma_obj->vaddr;
+ }
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
+ (clip.x1 >> 8) & 0xFF, clip.x1 & 0xFF,
+ (clip.x2 >> 8) & 0xFF, (clip.x2 - 1) & 0xFF);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
+ (clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF,
+ (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
+
+ ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr,
+ (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
+
+out_unlock:
+ mutex_unlock(&tdev->dirty_lock);
+
+ if (ret)
+ dev_err_once(fb->dev->dev, "Failed to update display %d\n",
+ ret);
+
+ return ret;
+}
+
+static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
+ .destroy = drm_fb_cma_destroy,
+ .create_handle = drm_fb_cma_create_handle,
+ .dirty = mipi_dbi_fb_dirty,
+};
+
+/**
+ * mipi_dbi_pipe_enable - MIPI DBI pipe enable helper
+ * @pipe: Display pipe
+ * @crtc_state: CRTC state
+ *
+ * This function enables backlight. Drivers can use this as their
+ * &drm_simple_display_pipe_funcs->enable callback.
+ */
+void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct drm_framebuffer *fb = pipe->plane.fb;
+
+ DRM_DEBUG_KMS("\n");
+
+ mipi->enabled = true;
+ if (fb)
+ fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
+
+ tinydrm_enable_backlight(mipi->backlight);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_enable);
+
+static void mipi_dbi_blank(struct mipi_dbi *mipi)
+{
+ struct drm_device *drm = mipi->tinydrm.drm;
+ u16 height = drm->mode_config.min_height;
+ u16 width = drm->mode_config.min_width;
+ size_t len = width * height * 2;
+
+ memset(mipi->tx_buf, 0, len);
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
+ (width >> 8) & 0xFF, (width - 1) & 0xFF);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
+ (height >> 8) & 0xFF, (height - 1) & 0xFF);
+ mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
+ (u8 *)mipi->tx_buf, len);
+}
+
+/**
+ * mipi_dbi_pipe_disable - MIPI DBI pipe disable helper
+ * @pipe: Display pipe
+ *
+ * This function disables backlight if present or if not the
+ * display memory is blanked. Drivers can use this as their
+ * &drm_simple_display_pipe_funcs->disable callback.
+ */
+void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+
+ DRM_DEBUG_KMS("\n");
+
+ mipi->enabled = false;
+
+ if (mipi->backlight)
+ tinydrm_disable_backlight(mipi->backlight);
+ else
+ mipi_dbi_blank(mipi);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_disable);
+
+static const uint32_t mipi_dbi_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+/**
+ * mipi_dbi_init - MIPI DBI initialization
+ * @dev: Parent device
+ * @mipi: &mipi_dbi structure to initialize
+ * @pipe_funcs: Display pipe functions
+ * @driver: DRM driver
+ * @mode: Display mode
+ * @rotation: Initial rotation in degrees Counter Clock Wise
+ *
+ * This function initializes a &mipi_dbi structure and it's underlying
+ * @tinydrm_device. It also sets up the display pipeline.
+ *
+ * Supported formats: Native RGB565 and emulated XRGB8888.
+ *
+ * Objects created by this function will be automatically freed on driver
+ * detach (devres).
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
+ const struct drm_simple_display_pipe_funcs *pipe_funcs,
+ struct drm_driver *driver,
+ const struct drm_display_mode *mode, unsigned int rotation)
+{
+ size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
+ struct tinydrm_device *tdev = &mipi->tinydrm;
+ int ret;
+
+ if (!mipi->command)
+ return -EINVAL;
+
+ mutex_init(&mipi->cmdlock);
+
+ mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
+ if (!mipi->tx_buf)
+ return -ENOMEM;
+
+ ret = devm_tinydrm_init(dev, tdev, &mipi_dbi_fb_funcs, driver);
+ if (ret)
+ return ret;
+
+ /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
+ ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ mipi_dbi_formats,
+ ARRAY_SIZE(mipi_dbi_formats), mode,
+ rotation);
+ if (ret)
+ return ret;
+
+ tdev->drm->mode_config.preferred_depth = 16;
+ mipi->rotation = rotation;
+
+ drm_mode_config_reset(tdev->drm);
+
+ DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
+ tdev->drm->mode_config.preferred_depth, rotation);
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dbi_init);
+
+/**
+ * mipi_dbi_hw_reset - Hardware reset of controller
+ * @mipi: MIPI DBI structure
+ *
+ * Reset controller if the &mipi_dbi->reset gpio is set.
+ */
+void mipi_dbi_hw_reset(struct mipi_dbi *mipi)
+{
+ if (!mipi->reset)
+ return;
+
+ gpiod_set_value_cansleep(mipi->reset, 0);
+ msleep(20);
+ gpiod_set_value_cansleep(mipi->reset, 1);
+ msleep(120);
+}
+EXPORT_SYMBOL(mipi_dbi_hw_reset);
+
+/**
+ * mipi_dbi_display_is_on - Check if display is on
+ * @mipi: MIPI DBI structure
+ *
+ * This function checks the Power Mode register (if readable) to see if
+ * display output is turned on. This can be used to see if the bootloader
+ * has already turned on the display avoiding flicker when the pipeline is
+ * enabled.
+ *
+ * Returns:
+ * true if the display can be verified to be on, false otherwise.
+ */
+bool mipi_dbi_display_is_on(struct mipi_dbi *mipi)
+{
+ u8 val;
+
+ if (mipi_dbi_command_read(mipi, MIPI_DCS_GET_POWER_MODE, &val))
+ return false;
+
+ val &= ~DCS_POWER_MODE_RESERVED_MASK;
+
+ if (val != (DCS_POWER_MODE_DISPLAY |
+ DCS_POWER_MODE_DISPLAY_NORMAL_MODE | DCS_POWER_MODE_SLEEP_MODE))
+ return false;
+
+ DRM_DEBUG_DRIVER("Display is ON\n");
+
+ return true;
+}
+EXPORT_SYMBOL(mipi_dbi_display_is_on);
+
+#if IS_ENABLED(CONFIG_SPI)
+
+/*
+ * Many controllers have a max speed of 10MHz, but can be pushed way beyond
+ * that. Increase reliability by running pixel data at max speed and the rest
+ * at 10MHz, preventing transfer glitches from messing up the init settings.
+ */
+static u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len)
+{
+ if (len > 64)
+ return 0; /* use default */
+
+ return min_t(u32, 10000000, spi->max_speed_hz);
+}
+
+/*
+ * MIPI DBI Type C Option 1
+ *
+ * If the SPI controller doesn't have 9 bits per word support,
+ * use blocks of 9 bytes to send 8x 9-bit words using a 8-bit SPI transfer.
+ * Pad partial blocks with MIPI_DCS_NOP (zero).
+ * This is how the D/C bit (x) is added:
+ * x7654321
+ * 0x765432
+ * 10x76543
+ * 210x7654
+ * 3210x765
+ * 43210x76
+ * 543210x7
+ * 6543210x
+ * 76543210
+ */
+
+static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
+ const void *buf, size_t len,
+ unsigned int bpw)
+{
+ bool swap_bytes = (bpw == 16 && tinydrm_machine_little_endian());
+ size_t chunk, max_chunk = mipi->tx_buf9_len;
+ struct spi_device *spi = mipi->spi;
+ struct spi_transfer tr = {
+ .tx_buf = mipi->tx_buf9,
+ .bits_per_word = 8,
+ };
+ struct spi_message m;
+ const u8 *src = buf;
+ int i, ret;
+ u8 *dst;
+
+ if (drm_debug & DRM_UT_DRIVER)
+ pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
+ __func__, dc, max_chunk);
+
+ tr.speed_hz = mipi_dbi_spi_cmd_max_speed(spi, len);
+ spi_message_init_with_transfers(&m, &tr, 1);
+
+ if (!dc) {
+ if (WARN_ON_ONCE(len != 1))
+ return -EINVAL;
+
+ /* Command: pad no-op's (zeroes) at beginning of block */
+ dst = mipi->tx_buf9;
+ memset(dst, 0, 9);
+ dst[8] = *src;
+ tr.len = 9;
+
+ tinydrm_dbg_spi_message(spi, &m);
+
+ return spi_sync(spi, &m);
+ }
+
+ /* max with room for adding one bit per byte */
+ max_chunk = max_chunk / 9 * 8;
+ /* but no bigger than len */
+ max_chunk = min(max_chunk, len);
+ /* 8 byte blocks */
+ max_chunk = max_t(size_t, 8, max_chunk & ~0x7);
+
+ while (len) {
+ size_t added = 0;
+
+ chunk = min(len, max_chunk);
+ len -= chunk;
+ dst = mipi->tx_buf9;
+
+ if (chunk < 8) {
+ u8 val, carry = 0;
+
+ /* Data: pad no-op's (zeroes) at end of block */
+ memset(dst, 0, 9);
+
+ if (swap_bytes) {
+ for (i = 1; i < (chunk + 1); i++) {
+ val = src[1];
+ *dst++ = carry | BIT(8 - i) | (val >> i);
+ carry = val << (8 - i);
+ i++;
+ val = src[0];
+ *dst++ = carry | BIT(8 - i) | (val >> i);
+ carry = val << (8 - i);
+ src += 2;
+ }
+ *dst++ = carry;
+ } else {
+ for (i = 1; i < (chunk + 1); i++) {
+ val = *src++;
+ *dst++ = carry | BIT(8 - i) | (val >> i);
+ carry = val << (8 - i);
+ }
+ *dst++ = carry;
+ }
+
+ chunk = 8;
+ added = 1;
+ } else {
+ for (i = 0; i < chunk; i += 8) {
+ if (swap_bytes) {
+ *dst++ = BIT(7) | (src[1] >> 1);
+ *dst++ = (src[1] << 7) | BIT(6) | (src[0] >> 2);
+ *dst++ = (src[0] << 6) | BIT(5) | (src[3] >> 3);
+ *dst++ = (src[3] << 5) | BIT(4) | (src[2] >> 4);
+ *dst++ = (src[2] << 4) | BIT(3) | (src[5] >> 5);
+ *dst++ = (src[5] << 3) | BIT(2) | (src[4] >> 6);
+ *dst++ = (src[4] << 2) | BIT(1) | (src[7] >> 7);
+ *dst++ = (src[7] << 1) | BIT(0);
+ *dst++ = src[6];
+ } else {
+ *dst++ = BIT(7) | (src[0] >> 1);
+ *dst++ = (src[0] << 7) | BIT(6) | (src[1] >> 2);
+ *dst++ = (src[1] << 6) | BIT(5) | (src[2] >> 3);
+ *dst++ = (src[2] << 5) | BIT(4) | (src[3] >> 4);
+ *dst++ = (src[3] << 4) | BIT(3) | (src[4] >> 5);
+ *dst++ = (src[4] << 3) | BIT(2) | (src[5] >> 6);
+ *dst++ = (src[5] << 2) | BIT(1) | (src[6] >> 7);
+ *dst++ = (src[6] << 1) | BIT(0);
+ *dst++ = src[7];
+ }
+
+ src += 8;
+ added++;
+ }
+ }
+
+ tr.len = chunk + added;
+
+ tinydrm_dbg_spi_message(spi, &m);
+ ret = spi_sync(spi, &m);
+ if (ret)
+ return ret;
+ };
+
+ return 0;
+}
+
+static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
+ const void *buf, size_t len,
+ unsigned int bpw)
+{
+ struct spi_device *spi = mipi->spi;
+ struct spi_transfer tr = {
+ .bits_per_word = 9,
+ };
+ const u16 *src16 = buf;
+ const u8 *src8 = buf;
+ struct spi_message m;
+ size_t max_chunk;
+ u16 *dst16;
+ int ret;
+
+ if (!tinydrm_spi_bpw_supported(spi, 9))
+ return mipi_dbi_spi1e_transfer(mipi, dc, buf, len, bpw);
+
+ tr.speed_hz = mipi_dbi_spi_cmd_max_speed(spi, len);
+ max_chunk = mipi->tx_buf9_len;
+ dst16 = mipi->tx_buf9;
+
+ if (drm_debug & DRM_UT_DRIVER)
+ pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
+ __func__, dc, max_chunk);
+
+ max_chunk = min(max_chunk / 2, len);
+
+ spi_message_init_with_transfers(&m, &tr, 1);
+ tr.tx_buf = dst16;
+
+ while (len) {
+ size_t chunk = min(len, max_chunk);
+ unsigned int i;
+
+ if (bpw == 16 && tinydrm_machine_little_endian()) {
+ for (i = 0; i < (chunk * 2); i += 2) {
+ dst16[i] = *src16 >> 8;
+ dst16[i + 1] = *src16++ & 0xFF;
+ if (dc) {
+ dst16[i] |= 0x0100;
+ dst16[i + 1] |= 0x0100;
+ }
+ }
+ } else {
+ for (i = 0; i < chunk; i++) {
+ dst16[i] = *src8++;
+ if (dc)
+ dst16[i] |= 0x0100;
+ }
+ }
+
+ tr.len = chunk;
+ len -= chunk;
+
+ tinydrm_dbg_spi_message(spi, &m);
+ ret = spi_sync(spi, &m);
+ if (ret)
+ return ret;
+ };
+
+ return 0;
+}
+
+static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
+ u8 *parameters, size_t num)
+{
+ unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
+ int ret;
+
+ if (mipi_dbi_command_is_read(mipi, cmd))
+ return -ENOTSUPP;
+
+ MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num);
+
+ ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8);
+ if (ret || !num)
+ return ret;
+
+ return mipi_dbi_spi1_transfer(mipi, 1, parameters, num, bpw);
+}
+
+/* MIPI DBI Type C Option 3 */
+
+static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
+ u8 *data, size_t len)
+{
+ struct spi_device *spi = mipi->spi;
+ u32 speed_hz = min_t(u32, MIPI_DBI_MAX_SPI_READ_SPEED,
+ spi->max_speed_hz / 2);
+ struct spi_transfer tr[2] = {
+ {
+ .speed_hz = speed_hz,
+ .tx_buf = &cmd,
+ .len = 1,
+ }, {
+ .speed_hz = speed_hz,
+ .len = len,
+ },
+ };
+ struct spi_message m;
+ u8 *buf;
+ int ret;
+
+ if (!len)
+ return -EINVAL;
+
+ /*
+ * Support non-standard 24-bit and 32-bit Nokia read commands which
+ * start with a dummy clock, so we need to read an extra byte.
+ */
+ if (cmd == MIPI_DCS_GET_DISPLAY_ID ||
+ cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
+ if (!(len == 3 || len == 4))
+ return -EINVAL;
+
+ tr[1].len = len + 1;
+ }
+
+ buf = kmalloc(tr[1].len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ tr[1].rx_buf = buf;
+ gpiod_set_value_cansleep(mipi->dc, 0);
+
+ spi_message_init_with_transfers(&m, tr, ARRAY_SIZE(tr));
+ ret = spi_sync(spi, &m);
+ if (ret)
+ goto err_free;
+
+ tinydrm_dbg_spi_message(spi, &m);
+
+ if (tr[1].len == len) {
+ memcpy(data, buf, len);
+ } else {
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
+ }
+
+ MIPI_DBI_DEBUG_COMMAND(cmd, data, len);
+
+err_free:
+ kfree(buf);
+
+ return ret;
+}
+
+static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
+ u8 *par, size_t num)
+{
+ struct spi_device *spi = mipi->spi;
+ unsigned int bpw = 8;
+ u32 speed_hz;
+ int ret;
+
+ if (mipi_dbi_command_is_read(mipi, cmd))
+ return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
+
+ MIPI_DBI_DEBUG_COMMAND(cmd, par, num);
+
+ gpiod_set_value_cansleep(mipi->dc, 0);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
+ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
+ if (ret || !num)
+ return ret;
+
+ if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
+ bpw = 16;
+
+ gpiod_set_value_cansleep(mipi->dc, 1);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+
+ return tinydrm_spi_transfer(spi, speed_hz, NULL, bpw, par, num);
+}
+
+/**
+ * mipi_dbi_spi_init - Initialize MIPI DBI SPI interfaced controller
+ * @spi: SPI device
+ * @dc: D/C gpio (optional)
+ * @mipi: &mipi_dbi structure to initialize
+ * @pipe_funcs: Display pipe functions
+ * @driver: DRM driver
+ * @mode: Display mode
+ * @rotation: Initial rotation in degrees Counter Clock Wise
+ *
+ * This function sets &mipi_dbi->command, enables &mipi->read_commands for the
+ * usual read commands and initializes @mipi using mipi_dbi_init().
+ *
+ * If @dc is set, a Type C Option 3 interface is assumed, if not
+ * Type C Option 1.
+ *
+ * If the SPI master driver doesn't support the necessary bits per word,
+ * the following transformation is used:
+ *
+ * - 9-bit: reorder buffer as 9x 8-bit words, padded with no-op command.
+ * - 16-bit: if big endian send as 8-bit, if little endian swap bytes
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
+ struct gpio_desc *dc,
+ const struct drm_simple_display_pipe_funcs *pipe_funcs,
+ struct drm_driver *driver,
+ const struct drm_display_mode *mode,
+ unsigned int rotation)
+{
+ size_t tx_size = tinydrm_spi_max_transfer_size(spi, 0);
+ struct device *dev = &spi->dev;
+ int ret;
+
+ if (tx_size < 16) {
+ DRM_ERROR("SPI transmit buffer too small: %zu\n", tx_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Even though it's not the SPI device that does DMA (the master does),
+ * the dma mask is necessary for the dma_alloc_wc() in
+ * drm_gem_cma_create(). The dma_addr returned will be a physical
+ * adddress which might be different from the bus address, but this is
+ * not a problem since the address will not be used.
+ * The virtual address is used in the transfer and the SPI core
+ * re-maps it on the SPI master device using the DMA streaming API
+ * (spi_map_buf()).
+ */
+ if (!dev->coherent_dma_mask) {
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_warn(dev, "Failed to set dma mask %d\n", ret);
+ return ret;
+ }
+ }
+
+ mipi->spi = spi;
+ mipi->read_commands = mipi_dbi_dcs_read_commands;
+
+ if (dc) {
+ mipi->command = mipi_dbi_typec3_command;
+ mipi->dc = dc;
+ if (tinydrm_machine_little_endian() &&
+ !tinydrm_spi_bpw_supported(spi, 16))
+ mipi->swap_bytes = true;
+ } else {
+ mipi->command = mipi_dbi_typec1_command;
+ mipi->tx_buf9_len = tx_size;
+ mipi->tx_buf9 = devm_kmalloc(dev, tx_size, GFP_KERNEL);
+ if (!mipi->tx_buf9)
+ return -ENOMEM;
+ }
+
+ return mipi_dbi_init(dev, mipi, pipe_funcs, driver, mode, rotation);
+}
+EXPORT_SYMBOL(mipi_dbi_spi_init);
+
+#endif /* CONFIG_SPI */
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct mipi_dbi *mipi = m->private;
+ u8 val, cmd = 0, parameters[64];
+ char *buf, *pos, *token;
+ unsigned int i;
+ int ret;
+
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* strip trailing whitespace */
+ for (i = count - 1; i > 0; i--)
+ if (isspace(buf[i]))
+ buf[i] = '\0';
+ else
+ break;
+ i = 0;
+ pos = buf;
+ while (pos) {
+ token = strsep(&pos, " ");
+ if (!token) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ ret = kstrtou8(token, 16, &val);
+ if (ret < 0)
+ goto err_free;
+
+ if (token == buf)
+ cmd = val;
+ else
+ parameters[i++] = val;
+
+ if (i == 64) {
+ ret = -E2BIG;
+ goto err_free;
+ }
+ }
+
+ ret = mipi_dbi_command_buf(mipi, cmd, parameters, i);
+
+err_free:
+ kfree(buf);
+
+ return ret < 0 ? ret : count;
+}
+
+static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
+{
+ struct mipi_dbi *mipi = m->private;
+ u8 cmd, val[4];
+ size_t len, i;
+ int ret;
+
+ for (cmd = 0; cmd < 255; cmd++) {
+ if (!mipi_dbi_command_is_read(mipi, cmd))
+ continue;
+
+ switch (cmd) {
+ case MIPI_DCS_READ_MEMORY_START:
+ case MIPI_DCS_READ_MEMORY_CONTINUE:
+ len = 2;
+ break;
+ case MIPI_DCS_GET_DISPLAY_ID:
+ len = 3;
+ break;
+ case MIPI_DCS_GET_DISPLAY_STATUS:
+ len = 4;
+ break;
+ default:
+ len = 1;
+ break;
+ }
+
+ seq_printf(m, "%02x: ", cmd);
+ ret = mipi_dbi_command_buf(mipi, cmd, val, len);
+ if (ret) {
+ seq_puts(m, "XX\n");
+ continue;
+ }
+
+ for (i = 0; i < len; i++)
+ seq_printf(m, "%02x", val[i]);
+ seq_puts(m, "\n");
+ }
+
+ return 0;
+}
+
+static int mipi_dbi_debugfs_command_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, mipi_dbi_debugfs_command_show,
+ inode->i_private);
+}
+
+static const struct file_operations mipi_dbi_debugfs_command_fops = {
+ .owner = THIS_MODULE,
+ .open = mipi_dbi_debugfs_command_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = mipi_dbi_debugfs_command_write,
+};
+
+static const struct drm_info_list mipi_dbi_debugfs_list[] = {
+ { "fb", drm_fb_cma_debugfs_show, 0 },
+};
+
+/**
+ * mipi_dbi_debugfs_init - Create debugfs entries
+ * @minor: DRM minor
+ *
+ * This function creates a 'command' debugfs file for sending commands to the
+ * controller or getting the read command values.
+ * Drivers can use this as their &drm_driver->debugfs_init callback.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int mipi_dbi_debugfs_init(struct drm_minor *minor)
+{
+ struct tinydrm_device *tdev = minor->dev->dev_private;
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ umode_t mode = S_IFREG | S_IWUSR;
+
+ if (mipi->read_commands)
+ mode |= S_IRUGO;
+ debugfs_create_file("command", mode, minor->debugfs_root, mipi,
+ &mipi_dbi_debugfs_command_fops);
+
+ return drm_debugfs_create_files(mipi_dbi_debugfs_list,
+ ARRAY_SIZE(mipi_dbi_debugfs_list),
+ minor->debugfs_root, minor);
+}
+EXPORT_SYMBOL(mipi_dbi_debugfs_init);
+
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ffc6cb55c78c..17478f38dea3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -163,6 +163,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man;
lockdep_assert_held(&bo->resv->lock.base);
@@ -170,11 +171,13 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
BUG_ON(!list_empty(&bo->lru));
- list_add(&bo->lru, bdev->driver->lru_tail(bo));
+ man = &bdev->man[bo->mem.mem_type];
+ list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref);
if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- list_add(&bo->swap, bdev->driver->swap_lru_tail(bo));
+ list_add_tail(&bo->swap,
+ &bo->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
}
@@ -188,11 +191,6 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
-
- if (bdev->driver->lru_removal)
- bdev->driver->lru_removal(bo);
-
if (!list_empty(&bo->swap)) {
list_del_init(&bo->swap);
kref_put(&bo->list_kref, ttm_bo_ref_bug);
@@ -201,6 +199,11 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
list_del_init(&bo->lru);
kref_put(&bo->list_kref, ttm_bo_ref_bug);
}
+
+ /*
+ * TODO: Add a driver hook to delete from
+ * driver-specific LRU's here.
+ */
}
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
@@ -213,30 +216,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
-
lockdep_assert_held(&bo->resv->lock.base);
- if (bdev->driver->lru_removal)
- bdev->driver->lru_removal(bo);
-
ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
-struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
-{
- return bo->bdev->man[bo->mem.mem_type].lru.prev;
-}
-EXPORT_SYMBOL(ttm_bo_default_lru_tail);
-
-struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
-{
- return bo->glob->swap_lru.prev;
-}
-EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
-
/*
* Call bo->mutex locked.
*/
@@ -327,7 +313,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
if (bdev->driver->move_notify)
- bdev->driver->move_notify(bo, mem);
+ bdev->driver->move_notify(bo, evict, mem);
bo->mem = *mem;
mem->mm_node = NULL;
goto moved;
@@ -335,7 +321,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
if (bdev->driver->move_notify)
- bdev->driver->move_notify(bo, mem);
+ bdev->driver->move_notify(bo, evict, mem);
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
@@ -351,7 +337,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg tmp_mem = *mem;
*mem = bo->mem;
bo->mem = tmp_mem;
- bdev->driver->move_notify(bo, mem);
+ bdev->driver->move_notify(bo, false, mem);
bo->mem = *mem;
*mem = tmp_mem;
}
@@ -399,7 +385,7 @@ out_err:
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
if (bo->bdev->driver->move_notify)
- bo->bdev->driver->move_notify(bo, NULL);
+ bo->bdev->driver->move_notify(bo, false, NULL);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
@@ -719,20 +705,27 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo;
int ret = -EBUSY;
+ unsigned i;
spin_lock(&glob->lru_lock);
- list_for_each_entry(bo, &man->lru, lru) {
- ret = __ttm_bo_reserve(bo, false, true, NULL);
- if (ret)
- continue;
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+ list_for_each_entry(bo, &man->lru[i], lru) {
+ ret = __ttm_bo_reserve(bo, false, true, NULL);
+ if (ret)
+ continue;
- if (place && !bdev->driver->eviction_valuable(bo, place)) {
- __ttm_bo_unreserve(bo);
- ret = -EBUSY;
- continue;
+ if (place && !bdev->driver->eviction_valuable(bo,
+ place)) {
+ __ttm_bo_unreserve(bo);
+ ret = -EBUSY;
+ continue;
+ }
+
+ break;
}
- break;
+ if (!ret)
+ break;
}
if (ret) {
@@ -1173,6 +1166,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
}
atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
+ bo->priority = 0;
/*
* For ttm_bo_type_device buffers, allocate
@@ -1267,29 +1261,27 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
EXPORT_SYMBOL(ttm_bo_create);
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- unsigned mem_type, bool allow_errors)
+ unsigned mem_type)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
struct dma_fence *fence;
int ret;
+ unsigned i;
/*
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&glob->lru_lock);
- while (!list_empty(&man->lru)) {
- spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
- if (ret) {
- if (allow_errors) {
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+ while (!list_empty(&man->lru[i])) {
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
+ if (ret)
return ret;
- } else {
- pr_err("Cleanup eviction failed\n");
- }
+ spin_lock(&glob->lru_lock);
}
- spin_lock(&glob->lru_lock);
}
spin_unlock(&glob->lru_lock);
@@ -1300,13 +1292,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
if (fence) {
ret = dma_fence_wait(fence, false);
dma_fence_put(fence);
- if (ret) {
- if (allow_errors) {
- return ret;
- } else {
- pr_err("Cleanup eviction failed\n");
- }
- }
+ if (ret)
+ return ret;
}
return 0;
@@ -1335,7 +1322,11 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = 0;
if (mem_type > 0) {
- ttm_bo_force_list_clean(bdev, mem_type, false);
+ ret = ttm_bo_force_list_clean(bdev, mem_type);
+ if (ret) {
+ pr_err("Cleanup eviction failed\n");
+ return ret;
+ }
ret = (*man->func->takedown)(man);
}
@@ -1358,7 +1349,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
return 0;
}
- return ttm_bo_force_list_clean(bdev, mem_type, true);
+ return ttm_bo_force_list_clean(bdev, mem_type);
}
EXPORT_SYMBOL(ttm_bo_evict_mm);
@@ -1367,6 +1358,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
{
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
+ unsigned i;
BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = &bdev->man[type];
@@ -1392,7 +1384,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
man->use_type = true;
man->size = p_size;
- INIT_LIST_HEAD(&man->lru);
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ INIT_LIST_HEAD(&man->lru[i]);
man->move = NULL;
return 0;
@@ -1424,6 +1417,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
container_of(ref, struct ttm_bo_global_ref, ref);
struct ttm_bo_global *glob = ref->object;
int ret;
+ unsigned i;
mutex_init(&glob->device_list_mutex);
spin_lock_init(&glob->lru_lock);
@@ -1435,7 +1429,8 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
goto out_no_drp;
}
- INIT_LIST_HEAD(&glob->swap_lru);
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ INIT_LIST_HEAD(&glob->swap_lru[i]);
INIT_LIST_HEAD(&glob->device_list);
ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
@@ -1494,8 +1489,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
if (list_empty(&bdev->ddestroy))
TTM_DEBUG("Delayed destroy list was clean\n");
- if (list_empty(&bdev->man[0].lru))
- TTM_DEBUG("Swap list was clean\n");
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ if (list_empty(&bdev->man[0].lru[0]))
+ TTM_DEBUG("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
drm_vma_offset_manager_destroy(&bdev->vma_manager);
@@ -1645,11 +1641,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
container_of(shrink, struct ttm_bo_global, shrink);
struct ttm_buffer_object *bo;
int ret = -EBUSY;
- uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
+ unsigned i;
spin_lock(&glob->lru_lock);
- list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = __ttm_bo_reserve(bo, false, true, NULL);
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+ list_for_each_entry(bo, &glob->swap_lru[i], swap) {
+ ret = __ttm_bo_reserve(bo, false, true, NULL);
+ if (!ret)
+ break;
+ }
if (!ret)
break;
}
@@ -1674,7 +1674,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
* Move to system cached
*/
- if ((bo->mem.placement & swap_placement) != swap_placement) {
+ if (bo->mem.mem_type != TTM_PL_SYSTEM ||
+ bo->ttm->caching_state != tt_cached) {
struct ttm_mem_reg evict_mem;
evict_mem = bo->mem;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index aa0bd054d3e9..90a6c0b03afc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -54,9 +54,8 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
- struct drm_mm_node *node = NULL;
- enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
- enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ struct drm_mm_node *node;
+ enum drm_mm_insert_mode mode;
unsigned long lpfn;
int ret;
@@ -68,16 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (!node)
return -ENOMEM;
- if (place->flags & TTM_PL_FLAG_TOPDOWN) {
- sflags = DRM_MM_SEARCH_BELOW;
- aflags = DRM_MM_CREATE_TOP;
- }
+ mode = DRM_MM_INSERT_BEST;
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ mode = DRM_MM_INSERT_HIGH;
spin_lock(&rman->lock);
- ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
+ ret = drm_mm_insert_node_in_range(mm, node,
+ mem->num_pages,
mem->page_alignment, 0,
- place->fpfn, lpfn,
- sflags, aflags);
+ place->fpfn, lpfn, mode);
spin_unlock(&rman->lock);
if (unlikely(ret)) {
@@ -141,17 +139,18 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&rman->lock);
- drm_mm_debug_table(&rman->mm, prefix);
+ drm_mm_print(&rman->mm, &p);
spin_unlock(&rman->lock);
}
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
- ttm_bo_man_init,
- ttm_bo_man_takedown,
- ttm_bo_man_get_node,
- ttm_bo_man_put_node,
- ttm_bo_man_debug
+ .init = ttm_bo_man_init,
+ .takedown = ttm_bo_man_takedown,
+ .get_node = ttm_bo_man_get_node,
+ .put_node = ttm_bo_man_put_node,
+ .debug = ttm_bo_man_debug
};
EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 68ef993ab431..88169141bef5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -66,8 +66,11 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
+ ttm_bo_reference(bo);
up_read(&vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
goto out_unlock;
}
@@ -120,8 +123,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ ttm_bo_reference(bo);
up_read(&vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
+ ttm_bo_unref(&bo);
}
return VM_FAULT_RETRY;
@@ -166,6 +171,13 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
if (unlikely(ret != 0)) {
retval = ret;
+
+ if (retval == VM_FAULT_RETRY &&
+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* The BO has already been unreserved. */
+ return retval;
+ }
+
goto out_unlock;
}
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index f338a576efc8..6c4286e57362 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -100,7 +100,7 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
void udl_urb_completion(struct urb *urb);
int udl_driver_load(struct drm_device *dev, unsigned long flags);
-int udl_driver_unload(struct drm_device *dev);
+void udl_driver_unload(struct drm_device *dev);
int udl_fbdev_init(struct drm_device *dev);
void udl_fbdev_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 167f42c67c7c..8e8d60e9a1a2 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -89,7 +89,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int bytes_identical = 0;
struct urb *urb;
int aligned_x;
- int bpp = (fb->base.bits_per_pixel / 8);
+ int bpp = fb->base.format->cpp[0];
if (!fb->active_16)
return 0;
@@ -330,7 +330,7 @@ udl_framebuffer_init(struct drm_device *dev,
int ret;
ufb->obj = obj;
- drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &ufb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
return ret;
}
@@ -395,7 +395,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &udlfb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
@@ -441,8 +441,7 @@ int udl_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, &ufbdev->helper,
- 1, 1);
+ ret = drm_fb_helper_init(dev, &ufbdev->helper, 1);
if (ret)
goto free;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 873f010d9616..a9d93b871a15 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -367,7 +367,7 @@ int udl_drop_usb(struct drm_device *dev)
return 0;
}
-int udl_driver_unload(struct drm_device *dev)
+void udl_driver_unload(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
@@ -379,5 +379,4 @@ int udl_driver_unload(struct drm_device *dev)
udl_fbdev_cleanup(dev);
udl_modeset_cleanup(dev);
kfree(udl);
- return 0;
}
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index e53df59cb139..e1517d07cb7d 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -2,10 +2,12 @@ config DRM_VC4
tristate "Broadcom VC4 Graphics"
depends on ARCH_BCM2835 || COMPILE_TEST
depends on DRM
+ depends on COMMON_CLK
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select DRM_PANEL
+ select DRM_MIPI_DSI
help
Choose this option if you have a system that has a Broadcom
VC4 GPU, such as the Raspberry Pi or other BCM2708/BCM2835.
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 7757f69a8a77..61f45d122bd0 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -8,6 +8,7 @@ vc4-y := \
vc4_crtc.o \
vc4_drv.o \
vc4_dpi.o \
+ vc4_dsi.o \
vc4_kms.o \
vc4_gem.o \
vc4_hdmi.o \
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7aadce1f7e7a..0c06844af445 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -156,7 +156,8 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
const struct drm_display_mode *mode)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
u32 val;
int fifo_lines;
int vblank_lines;
@@ -272,9 +273,7 @@ int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
int *max_error, struct timeval *vblank_time,
unsigned flags)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
- struct drm_crtc *crtc = &vc4_crtc->base;
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id);
struct drm_crtc_state *state = crtc->state;
/* Helper routine in DRM core does all the work: */
@@ -349,38 +348,40 @@ static u32 vc4_get_fifo_full_level(u32 format)
}
/*
- * Returns the clock select bit for the connector attached to the
- * CRTC.
+ * Returns the encoder attached to the CRTC.
+ *
+ * VC4 can only scan out to one encoder at a time, while the DRM core
+ * allows drivers to push pixels to more than one encoder from the
+ * same CRTC.
*/
-static int vc4_get_clock_select(struct drm_crtc *crtc)
+static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
{
struct drm_connector *connector;
drm_for_each_connector(connector, crtc->dev) {
if (connector->state->crtc == crtc) {
- struct drm_encoder *encoder = connector->encoder;
- struct vc4_encoder *vc4_encoder =
- to_vc4_encoder(encoder);
-
- return vc4_encoder->clock_select;
+ return connector->encoder;
}
}
- return -1;
+ return NULL;
}
static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
+ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_crtc_state *state = crtc->state;
struct drm_display_mode *mode = &state->adjusted_mode;
bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
- u32 format = PV_CONTROL_FORMAT_24;
+ bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
+ vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
+ u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
bool debug_dump_regs = false;
- int clock_select = vc4_get_clock_select(crtc);
if (debug_dump_regs) {
DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
@@ -436,17 +437,19 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
*/
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
+ (is_dsi ? PV_VCONTROL_DSI : 0) |
PV_VCONTROL_INTERLACE |
VC4_SET_FIELD(mode->htotal * pixel_rep / 2,
PV_VCONTROL_ODD_DELAY));
CRTC_WRITE(PV_VSYNCD_EVEN, 0);
} else {
- CRTC_WRITE(PV_V_CONTROL, PV_VCONTROL_CONTINUOUS);
+ CRTC_WRITE(PV_V_CONTROL,
+ PV_VCONTROL_CONTINUOUS |
+ (is_dsi ? PV_VCONTROL_DSI : 0));
}
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
-
CRTC_WRITE(PV_CONTROL,
VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
VC4_SET_FIELD(vc4_get_fifo_full_level(format),
@@ -455,7 +458,8 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
PV_CONTROL_CLR_AT_START |
PV_CONTROL_TRIGGER_UNDERFLOW |
PV_CONTROL_WAIT_HSTART |
- VC4_SET_FIELD(clock_select, PV_CONTROL_CLK_SELECT) |
+ VC4_SET_FIELD(vc4_encoder->clock_select,
+ PV_CONTROL_CLK_SELECT) |
PV_CONTROL_FIFO_CLR |
PV_CONTROL_EN);
@@ -589,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
- dlist_count, 1, 0);
+ dlist_count);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
if (ret)
return ret;
@@ -652,8 +656,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
CRTC_WRITE(PV_INTEN, PV_INT_VFP_START);
@@ -662,8 +666,8 @@ int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id)
void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
CRTC_WRITE(PV_INTEN, 0);
}
@@ -937,7 +941,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_crtc *vc4_crtc;
struct drm_crtc *crtc;
struct drm_plane *primary_plane, *cursor_plane, *destroy_plane, *temp;
@@ -975,7 +978,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
&vc4_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
primary_plane->crtc = crtc;
- vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc;
vc4_crtc->channel = vc4_crtc->data->hvs_channel;
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index caf817bac885..5db06bdb5f27 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -18,6 +18,7 @@
static const struct drm_info_list vc4_debugfs_list[] = {
{"bo_stats", vc4_bo_stats_debugfs, 0},
{"dpi_regs", vc4_dpi_debugfs_regs, 0},
+ {"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1},
{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
{"vec_regs", vc4_vec_debugfs_regs, 0},
{"hvs_regs", vc4_hvs_debugfs_regs, 0},
@@ -36,9 +37,3 @@ vc4_debugfs_init(struct drm_minor *minor)
return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
}
-
-void
-vc4_debugfs_cleanup(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES, minor);
-}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index ac09ca7ff430..a459745e96f7 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -145,7 +145,6 @@ static struct drm_driver vc4_drm_driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
- .debugfs_cleanup = vc4_debugfs_cleanup,
#endif
.gem_create_object = vc4_create_object,
@@ -296,6 +295,7 @@ static struct platform_driver *const component_drivers[] = {
&vc4_hdmi_driver,
&vc4_vec_driver,
&vc4_dpi_driver,
+ &vc4_dsi_driver,
&vc4_hvs_driver,
&vc4_crtc_driver,
&vc4_v3d_driver,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index b5c4bb14d0d1..0e59f3ee1b83 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -9,14 +9,16 @@
#include "drmP.h"
#include "drm_gem_cma_helper.h"
+#include <drm/drm_encoder.h>
+
struct vc4_dev {
struct drm_device *dev;
struct vc4_hdmi *hdmi;
struct vc4_hvs *hvs;
- struct vc4_crtc *crtc[3];
struct vc4_v3d *v3d;
struct vc4_dpi *dpi;
+ struct vc4_dsi *dsi1;
struct vc4_vec *vec;
struct drm_fbdev_cma *fbdev;
@@ -456,7 +458,6 @@ int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
/* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor);
-void vc4_debugfs_cleanup(struct drm_minor *minor);
/* vc4_drv.c */
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
@@ -465,6 +466,10 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
extern struct platform_driver vc4_dpi_driver;
int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
+/* vc4_dsi.c */
+extern struct platform_driver vc4_dsi_driver;
+int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
+
/* vc4_gem.c */
void vc4_gem_init(struct drm_device *dev);
void vc4_gem_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
new file mode 100644
index 000000000000..2736b0331beb
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -0,0 +1,1725 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * DOC: VC4 DSI0/DSI1 module
+ *
+ * BCM2835 contains two DSI modules, DSI0 and DSI1. DSI0 is a
+ * single-lane DSI controller, while DSI1 is a more modern 4-lane DSI
+ * controller.
+ *
+ * Most Raspberry Pi boards expose DSI1 as their "DISPLAY" connector,
+ * while the compute module brings both DSI0 and DSI1 out.
+ *
+ * This driver has been tested for DSI1 video-mode display only
+ * currently, with most of the information necessary for DSI0
+ * hopefully present.
+ */
+
+#include "drm_atomic_helper.h"
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+#include "drm_mipi_dsi.h"
+#include "drm_panel.h"
+#include "linux/clk.h"
+#include "linux/clk-provider.h"
+#include "linux/completion.h"
+#include "linux/component.h"
+#include "linux/dmaengine.h"
+#include "linux/i2c.h"
+#include "linux/of_address.h"
+#include "linux/of_platform.h"
+#include "linux/pm_runtime.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define DSI_CMD_FIFO_DEPTH 16
+#define DSI_PIX_FIFO_DEPTH 256
+#define DSI_PIX_FIFO_WIDTH 4
+
+#define DSI0_CTRL 0x00
+
+/* Command packet control. */
+#define DSI0_TXPKT1C 0x04 /* AKA PKTC */
+#define DSI1_TXPKT1C 0x04
+# define DSI_TXPKT1C_TRIG_CMD_MASK VC4_MASK(31, 24)
+# define DSI_TXPKT1C_TRIG_CMD_SHIFT 24
+# define DSI_TXPKT1C_CMD_REPEAT_MASK VC4_MASK(23, 10)
+# define DSI_TXPKT1C_CMD_REPEAT_SHIFT 10
+
+# define DSI_TXPKT1C_DISPLAY_NO_MASK VC4_MASK(9, 8)
+# define DSI_TXPKT1C_DISPLAY_NO_SHIFT 8
+/* Short, trigger, BTA, or a long packet that fits all in CMDFIFO. */
+# define DSI_TXPKT1C_DISPLAY_NO_SHORT 0
+/* Primary display where cmdfifo provides part of the payload and
+ * pixelvalve the rest.
+ */
+# define DSI_TXPKT1C_DISPLAY_NO_PRIMARY 1
+/* Secondary display where cmdfifo provides part of the payload and
+ * pixfifo the rest.
+ */
+# define DSI_TXPKT1C_DISPLAY_NO_SECONDARY 2
+
+# define DSI_TXPKT1C_CMD_TX_TIME_MASK VC4_MASK(7, 6)
+# define DSI_TXPKT1C_CMD_TX_TIME_SHIFT 6
+
+# define DSI_TXPKT1C_CMD_CTRL_MASK VC4_MASK(5, 4)
+# define DSI_TXPKT1C_CMD_CTRL_SHIFT 4
+/* Command only. Uses TXPKT1H and DISPLAY_NO */
+# define DSI_TXPKT1C_CMD_CTRL_TX 0
+/* Command with BTA for either ack or read data. */
+# define DSI_TXPKT1C_CMD_CTRL_RX 1
+/* Trigger according to TRIG_CMD */
+# define DSI_TXPKT1C_CMD_CTRL_TRIG 2
+/* BTA alone for getting error status after a command, or a TE trigger
+ * without a previous command.
+ */
+# define DSI_TXPKT1C_CMD_CTRL_BTA 3
+
+# define DSI_TXPKT1C_CMD_MODE_LP BIT(3)
+# define DSI_TXPKT1C_CMD_TYPE_LONG BIT(2)
+# define DSI_TXPKT1C_CMD_TE_EN BIT(1)
+# define DSI_TXPKT1C_CMD_EN BIT(0)
+
+/* Command packet header. */
+#define DSI0_TXPKT1H 0x08 /* AKA PKTH */
+#define DSI1_TXPKT1H 0x08
+# define DSI_TXPKT1H_BC_CMDFIFO_MASK VC4_MASK(31, 24)
+# define DSI_TXPKT1H_BC_CMDFIFO_SHIFT 24
+# define DSI_TXPKT1H_BC_PARAM_MASK VC4_MASK(23, 8)
+# define DSI_TXPKT1H_BC_PARAM_SHIFT 8
+# define DSI_TXPKT1H_BC_DT_MASK VC4_MASK(7, 0)
+# define DSI_TXPKT1H_BC_DT_SHIFT 0
+
+#define DSI0_RXPKT1H 0x0c /* AKA RX1_PKTH */
+#define DSI1_RXPKT1H 0x14
+# define DSI_RXPKT1H_CRC_ERR BIT(31)
+# define DSI_RXPKT1H_DET_ERR BIT(30)
+# define DSI_RXPKT1H_ECC_ERR BIT(29)
+# define DSI_RXPKT1H_COR_ERR BIT(28)
+# define DSI_RXPKT1H_INCOMP_PKT BIT(25)
+# define DSI_RXPKT1H_PKT_TYPE_LONG BIT(24)
+/* Byte count if DSI_RXPKT1H_PKT_TYPE_LONG */
+# define DSI_RXPKT1H_BC_PARAM_MASK VC4_MASK(23, 8)
+# define DSI_RXPKT1H_BC_PARAM_SHIFT 8
+/* Short return bytes if !DSI_RXPKT1H_PKT_TYPE_LONG */
+# define DSI_RXPKT1H_SHORT_1_MASK VC4_MASK(23, 16)
+# define DSI_RXPKT1H_SHORT_1_SHIFT 16
+# define DSI_RXPKT1H_SHORT_0_MASK VC4_MASK(15, 8)
+# define DSI_RXPKT1H_SHORT_0_SHIFT 8
+# define DSI_RXPKT1H_DT_LP_CMD_MASK VC4_MASK(7, 0)
+# define DSI_RXPKT1H_DT_LP_CMD_SHIFT 0
+
+#define DSI0_RXPKT2H 0x10 /* AKA RX2_PKTH */
+#define DSI1_RXPKT2H 0x18
+# define DSI_RXPKT1H_DET_ERR BIT(30)
+# define DSI_RXPKT1H_ECC_ERR BIT(29)
+# define DSI_RXPKT1H_COR_ERR BIT(28)
+# define DSI_RXPKT1H_INCOMP_PKT BIT(25)
+# define DSI_RXPKT1H_BC_PARAM_MASK VC4_MASK(23, 8)
+# define DSI_RXPKT1H_BC_PARAM_SHIFT 8
+# define DSI_RXPKT1H_DT_MASK VC4_MASK(7, 0)
+# define DSI_RXPKT1H_DT_SHIFT 0
+
+#define DSI0_TXPKT_CMD_FIFO 0x14 /* AKA CMD_DATAF */
+#define DSI1_TXPKT_CMD_FIFO 0x1c
+
+#define DSI0_DISP0_CTRL 0x18
+# define DSI_DISP0_PIX_CLK_DIV_MASK VC4_MASK(21, 13)
+# define DSI_DISP0_PIX_CLK_DIV_SHIFT 13
+# define DSI_DISP0_LP_STOP_CTRL_MASK VC4_MASK(12, 11)
+# define DSI_DISP0_LP_STOP_CTRL_SHIFT 11
+# define DSI_DISP0_LP_STOP_DISABLE 0
+# define DSI_DISP0_LP_STOP_PERLINE 1
+# define DSI_DISP0_LP_STOP_PERFRAME 2
+
+/* Transmit RGB pixels and null packets only during HACTIVE, instead
+ * of going to LP-STOP.
+ */
+# define DSI_DISP_HACTIVE_NULL BIT(10)
+/* Transmit blanking packet only during vblank, instead of allowing LP-STOP. */
+# define DSI_DISP_VBLP_CTRL BIT(9)
+/* Transmit blanking packet only during HFP, instead of allowing LP-STOP. */
+# define DSI_DISP_HFP_CTRL BIT(8)
+/* Transmit blanking packet only during HBP, instead of allowing LP-STOP. */
+# define DSI_DISP_HBP_CTRL BIT(7)
+# define DSI_DISP0_CHANNEL_MASK VC4_MASK(6, 5)
+# define DSI_DISP0_CHANNEL_SHIFT 5
+/* Enables end events for HSYNC/VSYNC, not just start events. */
+# define DSI_DISP0_ST_END BIT(4)
+# define DSI_DISP0_PFORMAT_MASK VC4_MASK(3, 2)
+# define DSI_DISP0_PFORMAT_SHIFT 2
+# define DSI_PFORMAT_RGB565 0
+# define DSI_PFORMAT_RGB666_PACKED 1
+# define DSI_PFORMAT_RGB666 2
+# define DSI_PFORMAT_RGB888 3
+/* Default is VIDEO mode. */
+# define DSI_DISP0_COMMAND_MODE BIT(1)
+# define DSI_DISP0_ENABLE BIT(0)
+
+#define DSI0_DISP1_CTRL 0x1c
+#define DSI1_DISP1_CTRL 0x2c
+/* Format of the data written to TXPKT_PIX_FIFO. */
+# define DSI_DISP1_PFORMAT_MASK VC4_MASK(2, 1)
+# define DSI_DISP1_PFORMAT_SHIFT 1
+# define DSI_DISP1_PFORMAT_16BIT 0
+# define DSI_DISP1_PFORMAT_24BIT 1
+# define DSI_DISP1_PFORMAT_32BIT_LE 2
+# define DSI_DISP1_PFORMAT_32BIT_BE 3
+
+/* DISP1 is always command mode. */
+# define DSI_DISP1_ENABLE BIT(0)
+
+#define DSI0_TXPKT_PIX_FIFO 0x20 /* AKA PIX_FIFO */
+
+#define DSI0_INT_STAT 0x24
+#define DSI0_INT_EN 0x28
+# define DSI1_INT_PHY_D3_ULPS BIT(30)
+# define DSI1_INT_PHY_D3_STOP BIT(29)
+# define DSI1_INT_PHY_D2_ULPS BIT(28)
+# define DSI1_INT_PHY_D2_STOP BIT(27)
+# define DSI1_INT_PHY_D1_ULPS BIT(26)
+# define DSI1_INT_PHY_D1_STOP BIT(25)
+# define DSI1_INT_PHY_D0_ULPS BIT(24)
+# define DSI1_INT_PHY_D0_STOP BIT(23)
+# define DSI1_INT_FIFO_ERR BIT(22)
+# define DSI1_INT_PHY_DIR_RTF BIT(21)
+# define DSI1_INT_PHY_RXLPDT BIT(20)
+# define DSI1_INT_PHY_RXTRIG BIT(19)
+# define DSI1_INT_PHY_D0_LPDT BIT(18)
+# define DSI1_INT_PHY_DIR_FTR BIT(17)
+
+/* Signaled when the clock lane enters the given state. */
+# define DSI1_INT_PHY_CLOCK_ULPS BIT(16)
+# define DSI1_INT_PHY_CLOCK_HS BIT(15)
+# define DSI1_INT_PHY_CLOCK_STOP BIT(14)
+
+/* Signaled on timeouts */
+# define DSI1_INT_PR_TO BIT(13)
+# define DSI1_INT_TA_TO BIT(12)
+# define DSI1_INT_LPRX_TO BIT(11)
+# define DSI1_INT_HSTX_TO BIT(10)
+
+/* Contention on a line when trying to drive the line low */
+# define DSI1_INT_ERR_CONT_LP1 BIT(9)
+# define DSI1_INT_ERR_CONT_LP0 BIT(8)
+
+/* Control error: incorrect line state sequence on data lane 0. */
+# define DSI1_INT_ERR_CONTROL BIT(7)
+/* LPDT synchronization error (bits received not a multiple of 8. */
+
+# define DSI1_INT_ERR_SYNC_ESC BIT(6)
+/* Signaled after receiving an error packet from the display in
+ * response to a read.
+ */
+# define DSI1_INT_RXPKT2 BIT(5)
+/* Signaled after receiving a packet. The header and optional short
+ * response will be in RXPKT1H, and a long response will be in the
+ * RXPKT_FIFO.
+ */
+# define DSI1_INT_RXPKT1 BIT(4)
+# define DSI1_INT_TXPKT2_DONE BIT(3)
+# define DSI1_INT_TXPKT2_END BIT(2)
+/* Signaled after all repeats of TXPKT1 are transferred. */
+# define DSI1_INT_TXPKT1_DONE BIT(1)
+/* Signaled after each TXPKT1 repeat is scheduled. */
+# define DSI1_INT_TXPKT1_END BIT(0)
+
+#define DSI1_INTERRUPTS_ALWAYS_ENABLED (DSI1_INT_ERR_SYNC_ESC | \
+ DSI1_INT_ERR_CONTROL | \
+ DSI1_INT_ERR_CONT_LP0 | \
+ DSI1_INT_ERR_CONT_LP1 | \
+ DSI1_INT_HSTX_TO | \
+ DSI1_INT_LPRX_TO | \
+ DSI1_INT_TA_TO | \
+ DSI1_INT_PR_TO)
+
+#define DSI0_STAT 0x2c
+#define DSI0_HSTX_TO_CNT 0x30
+#define DSI0_LPRX_TO_CNT 0x34
+#define DSI0_TA_TO_CNT 0x38
+#define DSI0_PR_TO_CNT 0x3c
+#define DSI0_PHYC 0x40
+# define DSI1_PHYC_ESC_CLK_LPDT_MASK VC4_MASK(25, 20)
+# define DSI1_PHYC_ESC_CLK_LPDT_SHIFT 20
+# define DSI1_PHYC_HS_CLK_CONTINUOUS BIT(18)
+# define DSI0_PHYC_ESC_CLK_LPDT_MASK VC4_MASK(17, 12)
+# define DSI0_PHYC_ESC_CLK_LPDT_SHIFT 12
+# define DSI1_PHYC_CLANE_ULPS BIT(17)
+# define DSI1_PHYC_CLANE_ENABLE BIT(16)
+# define DSI_PHYC_DLANE3_ULPS BIT(13)
+# define DSI_PHYC_DLANE3_ENABLE BIT(12)
+# define DSI0_PHYC_HS_CLK_CONTINUOUS BIT(10)
+# define DSI0_PHYC_CLANE_ULPS BIT(9)
+# define DSI_PHYC_DLANE2_ULPS BIT(9)
+# define DSI0_PHYC_CLANE_ENABLE BIT(8)
+# define DSI_PHYC_DLANE2_ENABLE BIT(8)
+# define DSI_PHYC_DLANE1_ULPS BIT(5)
+# define DSI_PHYC_DLANE1_ENABLE BIT(4)
+# define DSI_PHYC_DLANE0_FORCE_STOP BIT(2)
+# define DSI_PHYC_DLANE0_ULPS BIT(1)
+# define DSI_PHYC_DLANE0_ENABLE BIT(0)
+
+#define DSI0_HS_CLT0 0x44
+#define DSI0_HS_CLT1 0x48
+#define DSI0_HS_CLT2 0x4c
+#define DSI0_HS_DLT3 0x50
+#define DSI0_HS_DLT4 0x54
+#define DSI0_HS_DLT5 0x58
+#define DSI0_HS_DLT6 0x5c
+#define DSI0_HS_DLT7 0x60
+
+#define DSI0_PHY_AFEC0 0x64
+# define DSI0_PHY_AFEC0_DDR2CLK_EN BIT(26)
+# define DSI0_PHY_AFEC0_DDRCLK_EN BIT(25)
+# define DSI0_PHY_AFEC0_LATCH_ULPS BIT(24)
+# define DSI1_PHY_AFEC0_IDR_DLANE3_MASK VC4_MASK(31, 29)
+# define DSI1_PHY_AFEC0_IDR_DLANE3_SHIFT 29
+# define DSI1_PHY_AFEC0_IDR_DLANE2_MASK VC4_MASK(28, 26)
+# define DSI1_PHY_AFEC0_IDR_DLANE2_SHIFT 26
+# define DSI1_PHY_AFEC0_IDR_DLANE1_MASK VC4_MASK(27, 23)
+# define DSI1_PHY_AFEC0_IDR_DLANE1_SHIFT 23
+# define DSI1_PHY_AFEC0_IDR_DLANE0_MASK VC4_MASK(22, 20)
+# define DSI1_PHY_AFEC0_IDR_DLANE0_SHIFT 20
+# define DSI1_PHY_AFEC0_IDR_CLANE_MASK VC4_MASK(19, 17)
+# define DSI1_PHY_AFEC0_IDR_CLANE_SHIFT 17
+# define DSI0_PHY_AFEC0_ACTRL_DLANE1_MASK VC4_MASK(23, 20)
+# define DSI0_PHY_AFEC0_ACTRL_DLANE1_SHIFT 20
+# define DSI0_PHY_AFEC0_ACTRL_DLANE0_MASK VC4_MASK(19, 16)
+# define DSI0_PHY_AFEC0_ACTRL_DLANE0_SHIFT 16
+# define DSI0_PHY_AFEC0_ACTRL_CLANE_MASK VC4_MASK(15, 12)
+# define DSI0_PHY_AFEC0_ACTRL_CLANE_SHIFT 12
+# define DSI1_PHY_AFEC0_DDR2CLK_EN BIT(16)
+# define DSI1_PHY_AFEC0_DDRCLK_EN BIT(15)
+# define DSI1_PHY_AFEC0_LATCH_ULPS BIT(14)
+# define DSI1_PHY_AFEC0_RESET BIT(13)
+# define DSI1_PHY_AFEC0_PD BIT(12)
+# define DSI0_PHY_AFEC0_RESET BIT(11)
+# define DSI1_PHY_AFEC0_PD_BG BIT(11)
+# define DSI0_PHY_AFEC0_PD BIT(10)
+# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(10)
+# define DSI0_PHY_AFEC0_PD_BG BIT(9)
+# define DSI1_PHY_AFEC0_PD_DLANE2 BIT(9)
+# define DSI0_PHY_AFEC0_PD_DLANE1 BIT(8)
+# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(8)
+# define DSI_PHY_AFEC0_PTATADJ_MASK VC4_MASK(7, 4)
+# define DSI_PHY_AFEC0_PTATADJ_SHIFT 4
+# define DSI_PHY_AFEC0_CTATADJ_MASK VC4_MASK(3, 0)
+# define DSI_PHY_AFEC0_CTATADJ_SHIFT 0
+
+#define DSI0_PHY_AFEC1 0x68
+# define DSI0_PHY_AFEC1_IDR_DLANE1_MASK VC4_MASK(10, 8)
+# define DSI0_PHY_AFEC1_IDR_DLANE1_SHIFT 8
+# define DSI0_PHY_AFEC1_IDR_DLANE0_MASK VC4_MASK(6, 4)
+# define DSI0_PHY_AFEC1_IDR_DLANE0_SHIFT 4
+# define DSI0_PHY_AFEC1_IDR_CLANE_MASK VC4_MASK(2, 0)
+# define DSI0_PHY_AFEC1_IDR_CLANE_SHIFT 0
+
+#define DSI0_TST_SEL 0x6c
+#define DSI0_TST_MON 0x70
+#define DSI0_ID 0x74
+# define DSI_ID_VALUE 0x00647369
+
+#define DSI1_CTRL 0x00
+# define DSI_CTRL_HS_CLKC_MASK VC4_MASK(15, 14)
+# define DSI_CTRL_HS_CLKC_SHIFT 14
+# define DSI_CTRL_HS_CLKC_BYTE 0
+# define DSI_CTRL_HS_CLKC_DDR2 1
+# define DSI_CTRL_HS_CLKC_DDR 2
+
+# define DSI_CTRL_RX_LPDT_EOT_DISABLE BIT(13)
+# define DSI_CTRL_LPDT_EOT_DISABLE BIT(12)
+# define DSI_CTRL_HSDT_EOT_DISABLE BIT(11)
+# define DSI_CTRL_SOFT_RESET_CFG BIT(10)
+# define DSI_CTRL_CAL_BYTE BIT(9)
+# define DSI_CTRL_INV_BYTE BIT(8)
+# define DSI_CTRL_CLR_LDF BIT(7)
+# define DSI0_CTRL_CLR_PBCF BIT(6)
+# define DSI1_CTRL_CLR_RXF BIT(6)
+# define DSI0_CTRL_CLR_CPBCF BIT(5)
+# define DSI1_CTRL_CLR_PDF BIT(5)
+# define DSI0_CTRL_CLR_PDF BIT(4)
+# define DSI1_CTRL_CLR_CDF BIT(4)
+# define DSI0_CTRL_CLR_CDF BIT(3)
+# define DSI0_CTRL_CTRL2 BIT(2)
+# define DSI1_CTRL_DISABLE_DISP_CRCC BIT(2)
+# define DSI0_CTRL_CTRL1 BIT(1)
+# define DSI1_CTRL_DISABLE_DISP_ECCC BIT(1)
+# define DSI0_CTRL_CTRL0 BIT(0)
+# define DSI1_CTRL_EN BIT(0)
+# define DSI0_CTRL_RESET_FIFOS (DSI_CTRL_CLR_LDF | \
+ DSI0_CTRL_CLR_PBCF | \
+ DSI0_CTRL_CLR_CPBCF | \
+ DSI0_CTRL_CLR_PDF | \
+ DSI0_CTRL_CLR_CDF)
+# define DSI1_CTRL_RESET_FIFOS (DSI_CTRL_CLR_LDF | \
+ DSI1_CTRL_CLR_RXF | \
+ DSI1_CTRL_CLR_PDF | \
+ DSI1_CTRL_CLR_CDF)
+
+#define DSI1_TXPKT2C 0x0c
+#define DSI1_TXPKT2H 0x10
+#define DSI1_TXPKT_PIX_FIFO 0x20
+#define DSI1_RXPKT_FIFO 0x24
+#define DSI1_DISP0_CTRL 0x28
+#define DSI1_INT_STAT 0x30
+#define DSI1_INT_EN 0x34
+/* State reporting bits. These mostly behave like INT_STAT, where
+ * writing a 1 clears the bit.
+ */
+#define DSI1_STAT 0x38
+# define DSI1_STAT_PHY_D3_ULPS BIT(31)
+# define DSI1_STAT_PHY_D3_STOP BIT(30)
+# define DSI1_STAT_PHY_D2_ULPS BIT(29)
+# define DSI1_STAT_PHY_D2_STOP BIT(28)
+# define DSI1_STAT_PHY_D1_ULPS BIT(27)
+# define DSI1_STAT_PHY_D1_STOP BIT(26)
+# define DSI1_STAT_PHY_D0_ULPS BIT(25)
+# define DSI1_STAT_PHY_D0_STOP BIT(24)
+# define DSI1_STAT_FIFO_ERR BIT(23)
+# define DSI1_STAT_PHY_RXLPDT BIT(22)
+# define DSI1_STAT_PHY_RXTRIG BIT(21)
+# define DSI1_STAT_PHY_D0_LPDT BIT(20)
+/* Set when in forward direction */
+# define DSI1_STAT_PHY_DIR BIT(19)
+# define DSI1_STAT_PHY_CLOCK_ULPS BIT(18)
+# define DSI1_STAT_PHY_CLOCK_HS BIT(17)
+# define DSI1_STAT_PHY_CLOCK_STOP BIT(16)
+# define DSI1_STAT_PR_TO BIT(15)
+# define DSI1_STAT_TA_TO BIT(14)
+# define DSI1_STAT_LPRX_TO BIT(13)
+# define DSI1_STAT_HSTX_TO BIT(12)
+# define DSI1_STAT_ERR_CONT_LP1 BIT(11)
+# define DSI1_STAT_ERR_CONT_LP0 BIT(10)
+# define DSI1_STAT_ERR_CONTROL BIT(9)
+# define DSI1_STAT_ERR_SYNC_ESC BIT(8)
+# define DSI1_STAT_RXPKT2 BIT(7)
+# define DSI1_STAT_RXPKT1 BIT(6)
+# define DSI1_STAT_TXPKT2_BUSY BIT(5)
+# define DSI1_STAT_TXPKT2_DONE BIT(4)
+# define DSI1_STAT_TXPKT2_END BIT(3)
+# define DSI1_STAT_TXPKT1_BUSY BIT(2)
+# define DSI1_STAT_TXPKT1_DONE BIT(1)
+# define DSI1_STAT_TXPKT1_END BIT(0)
+
+#define DSI1_HSTX_TO_CNT 0x3c
+#define DSI1_LPRX_TO_CNT 0x40
+#define DSI1_TA_TO_CNT 0x44
+#define DSI1_PR_TO_CNT 0x48
+#define DSI1_PHYC 0x4c
+
+#define DSI1_HS_CLT0 0x50
+# define DSI_HS_CLT0_CZERO_MASK VC4_MASK(26, 18)
+# define DSI_HS_CLT0_CZERO_SHIFT 18
+# define DSI_HS_CLT0_CPRE_MASK VC4_MASK(17, 9)
+# define DSI_HS_CLT0_CPRE_SHIFT 9
+# define DSI_HS_CLT0_CPREP_MASK VC4_MASK(8, 0)
+# define DSI_HS_CLT0_CPREP_SHIFT 0
+
+#define DSI1_HS_CLT1 0x54
+# define DSI_HS_CLT1_CTRAIL_MASK VC4_MASK(17, 9)
+# define DSI_HS_CLT1_CTRAIL_SHIFT 9
+# define DSI_HS_CLT1_CPOST_MASK VC4_MASK(8, 0)
+# define DSI_HS_CLT1_CPOST_SHIFT 0
+
+#define DSI1_HS_CLT2 0x58
+# define DSI_HS_CLT2_WUP_MASK VC4_MASK(23, 0)
+# define DSI_HS_CLT2_WUP_SHIFT 0
+
+#define DSI1_HS_DLT3 0x5c
+# define DSI_HS_DLT3_EXIT_MASK VC4_MASK(26, 18)
+# define DSI_HS_DLT3_EXIT_SHIFT 18
+# define DSI_HS_DLT3_ZERO_MASK VC4_MASK(17, 9)
+# define DSI_HS_DLT3_ZERO_SHIFT 9
+# define DSI_HS_DLT3_PRE_MASK VC4_MASK(8, 0)
+# define DSI_HS_DLT3_PRE_SHIFT 0
+
+#define DSI1_HS_DLT4 0x60
+# define DSI_HS_DLT4_ANLAT_MASK VC4_MASK(22, 18)
+# define DSI_HS_DLT4_ANLAT_SHIFT 18
+# define DSI_HS_DLT4_TRAIL_MASK VC4_MASK(17, 9)
+# define DSI_HS_DLT4_TRAIL_SHIFT 9
+# define DSI_HS_DLT4_LPX_MASK VC4_MASK(8, 0)
+# define DSI_HS_DLT4_LPX_SHIFT 0
+
+#define DSI1_HS_DLT5 0x64
+# define DSI_HS_DLT5_INIT_MASK VC4_MASK(23, 0)
+# define DSI_HS_DLT5_INIT_SHIFT 0
+
+#define DSI1_HS_DLT6 0x68
+# define DSI_HS_DLT6_TA_GET_MASK VC4_MASK(31, 24)
+# define DSI_HS_DLT6_TA_GET_SHIFT 24
+# define DSI_HS_DLT6_TA_SURE_MASK VC4_MASK(23, 16)
+# define DSI_HS_DLT6_TA_SURE_SHIFT 16
+# define DSI_HS_DLT6_TA_GO_MASK VC4_MASK(15, 8)
+# define DSI_HS_DLT6_TA_GO_SHIFT 8
+# define DSI_HS_DLT6_LP_LPX_MASK VC4_MASK(7, 0)
+# define DSI_HS_DLT6_LP_LPX_SHIFT 0
+
+#define DSI1_HS_DLT7 0x6c
+# define DSI_HS_DLT7_LP_WUP_MASK VC4_MASK(23, 0)
+# define DSI_HS_DLT7_LP_WUP_SHIFT 0
+
+#define DSI1_PHY_AFEC0 0x70
+
+#define DSI1_PHY_AFEC1 0x74
+# define DSI1_PHY_AFEC1_ACTRL_DLANE3_MASK VC4_MASK(19, 16)
+# define DSI1_PHY_AFEC1_ACTRL_DLANE3_SHIFT 16
+# define DSI1_PHY_AFEC1_ACTRL_DLANE2_MASK VC4_MASK(15, 12)
+# define DSI1_PHY_AFEC1_ACTRL_DLANE2_SHIFT 12
+# define DSI1_PHY_AFEC1_ACTRL_DLANE1_MASK VC4_MASK(11, 8)
+# define DSI1_PHY_AFEC1_ACTRL_DLANE1_SHIFT 8
+# define DSI1_PHY_AFEC1_ACTRL_DLANE0_MASK VC4_MASK(7, 4)
+# define DSI1_PHY_AFEC1_ACTRL_DLANE0_SHIFT 4
+# define DSI1_PHY_AFEC1_ACTRL_CLANE_MASK VC4_MASK(3, 0)
+# define DSI1_PHY_AFEC1_ACTRL_CLANE_SHIFT 0
+
+#define DSI1_TST_SEL 0x78
+#define DSI1_TST_MON 0x7c
+#define DSI1_PHY_TST1 0x80
+#define DSI1_PHY_TST2 0x84
+#define DSI1_PHY_FIFO_STAT 0x88
+/* Actually, all registers in the range that aren't otherwise claimed
+ * will return the ID.
+ */
+#define DSI1_ID 0x8c
+
+/* General DSI hardware state. */
+struct vc4_dsi {
+ struct platform_device *pdev;
+
+ struct mipi_dsi_host dsi_host;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_panel *panel;
+
+ void __iomem *regs;
+
+ struct dma_chan *reg_dma_chan;
+ dma_addr_t reg_dma_paddr;
+ u32 *reg_dma_mem;
+ dma_addr_t reg_paddr;
+
+ /* Whether we're on bcm2835's DSI0 or DSI1. */
+ int port;
+
+ /* DSI channel for the panel we're connected to. */
+ u32 channel;
+ u32 lanes;
+ enum mipi_dsi_pixel_format format;
+ u32 mode_flags;
+
+ /* Input clock from CPRMAN to the digital PHY, for the DSI
+ * escape clock.
+ */
+ struct clk *escape_clock;
+
+ /* Input clock to the analog PHY, used to generate the DSI bit
+ * clock.
+ */
+ struct clk *pll_phy_clock;
+
+ /* HS Clocks generated within the DSI analog PHY. */
+ struct clk_fixed_factor phy_clocks[3];
+
+ struct clk_hw_onecell_data *clk_onecell;
+
+ /* Pixel clock output to the pixelvalve, generated from the HS
+ * clock.
+ */
+ struct clk *pixel_clock;
+
+ struct completion xfer_completion;
+ int xfer_result;
+};
+
+#define host_to_dsi(host) container_of(host, struct vc4_dsi, dsi_host)
+
+static inline void
+dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
+{
+ struct dma_chan *chan = dsi->reg_dma_chan;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ int ret;
+
+ /* DSI0 should be able to write normally. */
+ if (!chan) {
+ writel(val, dsi->regs + offset);
+ return;
+ }
+
+ *dsi->reg_dma_mem = val;
+
+ tx = chan->device->device_prep_dma_memcpy(chan,
+ dsi->reg_paddr + offset,
+ dsi->reg_dma_paddr,
+ 4, 0);
+ if (!tx) {
+ DRM_ERROR("Failed to set up DMA register write\n");
+ return;
+ }
+
+ cookie = tx->tx_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ DRM_ERROR("Failed to submit DMA: %d\n", ret);
+ return;
+ }
+ ret = dma_sync_wait(chan, cookie);
+ if (ret)
+ DRM_ERROR("Failed to wait for DMA: %d\n", ret);
+}
+
+#define DSI_READ(offset) readl(dsi->regs + (offset))
+#define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val)
+#define DSI_PORT_READ(offset) \
+ DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset)
+#define DSI_PORT_WRITE(offset, val) \
+ DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val)
+#define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit)
+
+/* VC4 DSI encoder KMS struct */
+struct vc4_dsi_encoder {
+ struct vc4_encoder base;
+ struct vc4_dsi *dsi;
+};
+
+static inline struct vc4_dsi_encoder *
+to_vc4_dsi_encoder(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_dsi_encoder, base.base);
+}
+
+/* VC4 DSI connector KMS struct */
+struct vc4_dsi_connector {
+ struct drm_connector base;
+ struct vc4_dsi *dsi;
+};
+
+static inline struct vc4_dsi_connector *
+to_vc4_dsi_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct vc4_dsi_connector, base);
+}
+
+#define DSI_REG(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} dsi0_regs[] = {
+ DSI_REG(DSI0_CTRL),
+ DSI_REG(DSI0_STAT),
+ DSI_REG(DSI0_HSTX_TO_CNT),
+ DSI_REG(DSI0_LPRX_TO_CNT),
+ DSI_REG(DSI0_TA_TO_CNT),
+ DSI_REG(DSI0_PR_TO_CNT),
+ DSI_REG(DSI0_DISP0_CTRL),
+ DSI_REG(DSI0_DISP1_CTRL),
+ DSI_REG(DSI0_INT_STAT),
+ DSI_REG(DSI0_INT_EN),
+ DSI_REG(DSI0_PHYC),
+ DSI_REG(DSI0_HS_CLT0),
+ DSI_REG(DSI0_HS_CLT1),
+ DSI_REG(DSI0_HS_CLT2),
+ DSI_REG(DSI0_HS_DLT3),
+ DSI_REG(DSI0_HS_DLT4),
+ DSI_REG(DSI0_HS_DLT5),
+ DSI_REG(DSI0_HS_DLT6),
+ DSI_REG(DSI0_HS_DLT7),
+ DSI_REG(DSI0_PHY_AFEC0),
+ DSI_REG(DSI0_PHY_AFEC1),
+ DSI_REG(DSI0_ID),
+};
+
+static const struct {
+ u32 reg;
+ const char *name;
+} dsi1_regs[] = {
+ DSI_REG(DSI1_CTRL),
+ DSI_REG(DSI1_STAT),
+ DSI_REG(DSI1_HSTX_TO_CNT),
+ DSI_REG(DSI1_LPRX_TO_CNT),
+ DSI_REG(DSI1_TA_TO_CNT),
+ DSI_REG(DSI1_PR_TO_CNT),
+ DSI_REG(DSI1_DISP0_CTRL),
+ DSI_REG(DSI1_DISP1_CTRL),
+ DSI_REG(DSI1_INT_STAT),
+ DSI_REG(DSI1_INT_EN),
+ DSI_REG(DSI1_PHYC),
+ DSI_REG(DSI1_HS_CLT0),
+ DSI_REG(DSI1_HS_CLT1),
+ DSI_REG(DSI1_HS_CLT2),
+ DSI_REG(DSI1_HS_DLT3),
+ DSI_REG(DSI1_HS_DLT4),
+ DSI_REG(DSI1_HS_DLT5),
+ DSI_REG(DSI1_HS_DLT6),
+ DSI_REG(DSI1_HS_DLT7),
+ DSI_REG(DSI1_PHY_AFEC0),
+ DSI_REG(DSI1_PHY_AFEC1),
+ DSI_REG(DSI1_ID),
+};
+
+static void vc4_dsi_dump_regs(struct vc4_dsi *dsi)
+{
+ int i;
+
+ if (dsi->port == 0) {
+ for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) {
+ DRM_INFO("0x%04x (%s): 0x%08x\n",
+ dsi0_regs[i].reg, dsi0_regs[i].name,
+ DSI_READ(dsi0_regs[i].reg));
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) {
+ DRM_INFO("0x%04x (%s): 0x%08x\n",
+ dsi1_regs[i].reg, dsi1_regs[i].name,
+ DSI_READ(dsi1_regs[i].reg));
+ }
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *drm = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ int dsi_index = (uintptr_t)node->info_ent->data;
+ struct vc4_dsi *dsi = (dsi_index == 1 ? vc4->dsi1 : NULL);
+ int i;
+
+ if (!dsi)
+ return 0;
+
+ if (dsi->port == 0) {
+ for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) {
+ seq_printf(m, "0x%04x (%s): 0x%08x\n",
+ dsi0_regs[i].reg, dsi0_regs[i].name,
+ DSI_READ(dsi0_regs[i].reg));
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) {
+ seq_printf(m, "0x%04x (%s): 0x%08x\n",
+ dsi1_regs[i].reg, dsi1_regs[i].name,
+ DSI_READ(dsi1_regs[i].reg));
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static enum drm_connector_status
+vc4_dsi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct vc4_dsi_connector *vc4_connector =
+ to_vc4_dsi_connector(connector);
+ struct vc4_dsi *dsi = vc4_connector->dsi;
+
+ if (dsi->panel)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void vc4_dsi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static int vc4_dsi_connector_get_modes(struct drm_connector *connector)
+{
+ struct vc4_dsi_connector *vc4_connector =
+ to_vc4_dsi_connector(connector);
+ struct vc4_dsi *dsi = vc4_connector->dsi;
+
+ if (dsi->panel)
+ return drm_panel_get_modes(dsi->panel);
+
+ return 0;
+}
+
+static const struct drm_connector_funcs vc4_dsi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = vc4_dsi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vc4_dsi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs vc4_dsi_connector_helper_funcs = {
+ .get_modes = vc4_dsi_connector_get_modes,
+};
+
+static struct drm_connector *vc4_dsi_connector_init(struct drm_device *dev,
+ struct vc4_dsi *dsi)
+{
+ struct drm_connector *connector = NULL;
+ struct vc4_dsi_connector *dsi_connector;
+ int ret = 0;
+
+ dsi_connector = devm_kzalloc(dev->dev, sizeof(*dsi_connector),
+ GFP_KERNEL);
+ if (!dsi_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ connector = &dsi_connector->base;
+
+ dsi_connector->dsi = dsi;
+
+ drm_connector_init(dev, connector, &vc4_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_connector_helper_add(connector, &vc4_dsi_connector_helper_funcs);
+
+ connector->polled = 0;
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_mode_connector_attach_encoder(connector, dsi->encoder);
+
+ return connector;
+
+fail:
+ if (connector)
+ vc4_dsi_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
+
+static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = {
+ .destroy = vc4_dsi_encoder_destroy,
+};
+
+static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch)
+{
+ u32 afec0 = DSI_PORT_READ(PHY_AFEC0);
+
+ if (latch)
+ afec0 |= DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS);
+ else
+ afec0 &= ~DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS);
+
+ DSI_PORT_WRITE(PHY_AFEC0, afec0);
+}
+
+/* Enters or exits Ultra Low Power State. */
+static void vc4_dsi_ulps(struct vc4_dsi *dsi, bool ulps)
+{
+ bool continuous = dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ u32 phyc_ulps = ((continuous ? DSI_PORT_BIT(PHYC_CLANE_ULPS) : 0) |
+ DSI_PHYC_DLANE0_ULPS |
+ (dsi->lanes > 1 ? DSI_PHYC_DLANE1_ULPS : 0) |
+ (dsi->lanes > 2 ? DSI_PHYC_DLANE2_ULPS : 0) |
+ (dsi->lanes > 3 ? DSI_PHYC_DLANE3_ULPS : 0));
+ u32 stat_ulps = ((continuous ? DSI1_STAT_PHY_CLOCK_ULPS : 0) |
+ DSI1_STAT_PHY_D0_ULPS |
+ (dsi->lanes > 1 ? DSI1_STAT_PHY_D1_ULPS : 0) |
+ (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_ULPS : 0) |
+ (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_ULPS : 0));
+ u32 stat_stop = ((continuous ? DSI1_STAT_PHY_CLOCK_STOP : 0) |
+ DSI1_STAT_PHY_D0_STOP |
+ (dsi->lanes > 1 ? DSI1_STAT_PHY_D1_STOP : 0) |
+ (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_STOP : 0) |
+ (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_STOP : 0));
+ int ret;
+
+ DSI_PORT_WRITE(STAT, stat_ulps);
+ DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) | phyc_ulps);
+ ret = wait_for((DSI_PORT_READ(STAT) & stat_ulps) == stat_ulps, 200);
+ if (ret) {
+ dev_warn(&dsi->pdev->dev,
+ "Timeout waiting for DSI ULPS entry: STAT 0x%08x",
+ DSI_PORT_READ(STAT));
+ DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps);
+ vc4_dsi_latch_ulps(dsi, false);
+ return;
+ }
+
+ /* The DSI module can't be disabled while the module is
+ * generating ULPS state. So, to be able to disable the
+ * module, we have the AFE latch the ULPS state and continue
+ * on to having the module enter STOP.
+ */
+ vc4_dsi_latch_ulps(dsi, ulps);
+
+ DSI_PORT_WRITE(STAT, stat_stop);
+ DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps);
+ ret = wait_for((DSI_PORT_READ(STAT) & stat_stop) == stat_stop, 200);
+ if (ret) {
+ dev_warn(&dsi->pdev->dev,
+ "Timeout waiting for DSI STOP entry: STAT 0x%08x",
+ DSI_PORT_READ(STAT));
+ DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps);
+ return;
+ }
+}
+
+static u32
+dsi_hs_timing(u32 ui_ns, u32 ns, u32 ui)
+{
+ /* The HS timings have to be rounded up to a multiple of 8
+ * because we're using the byte clock.
+ */
+ return roundup(ui + DIV_ROUND_UP(ns, ui_ns), 8);
+}
+
+/* ESC always runs at 100Mhz. */
+#define ESC_TIME_NS 10
+
+static u32
+dsi_esc_timing(u32 ns)
+{
+ return DIV_ROUND_UP(ns, ESC_TIME_NS);
+}
+
+static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
+ struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct device *dev = &dsi->pdev->dev;
+
+ drm_panel_disable(dsi->panel);
+
+ vc4_dsi_ulps(dsi, true);
+
+ drm_panel_unprepare(dsi->panel);
+
+ clk_disable_unprepare(dsi->pll_phy_clock);
+ clk_disable_unprepare(dsi->escape_clock);
+ clk_disable_unprepare(dsi->pixel_clock);
+
+ pm_runtime_put(dev);
+}
+
+static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_display_mode *mode = &encoder->crtc->mode;
+ struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
+ struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct device *dev = &dsi->pdev->dev;
+ u32 format = 0, divider = 0;
+ bool debug_dump_regs = false;
+ unsigned long hs_clock;
+ u32 ui_ns;
+ /* Minimum LP state duration in escape clock cycles. */
+ u32 lpx = dsi_esc_timing(60);
+ unsigned long pixel_clock_hz = mode->clock * 1000;
+ unsigned long dsip_clock;
+ unsigned long phy_clock;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
+ return;
+ }
+
+ ret = drm_panel_prepare(dsi->panel);
+ if (ret) {
+ DRM_ERROR("Panel failed to prepare\n");
+ return;
+ }
+
+ if (debug_dump_regs) {
+ DRM_INFO("DSI regs before:\n");
+ vc4_dsi_dump_regs(dsi);
+ }
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ format = DSI_PFORMAT_RGB888;
+ divider = 24 / dsi->lanes;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ format = DSI_PFORMAT_RGB666;
+ divider = 24 / dsi->lanes;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ format = DSI_PFORMAT_RGB666_PACKED;
+ divider = 18 / dsi->lanes;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ format = DSI_PFORMAT_RGB565;
+ divider = 16 / dsi->lanes;
+ break;
+ }
+
+ phy_clock = pixel_clock_hz * divider;
+ ret = clk_set_rate(dsi->pll_phy_clock, phy_clock);
+ if (ret) {
+ dev_err(&dsi->pdev->dev,
+ "Failed to set phy clock to %ld: %d\n", phy_clock, ret);
+ }
+
+ /* Reset the DSI and all its fifos. */
+ DSI_PORT_WRITE(CTRL,
+ DSI_CTRL_SOFT_RESET_CFG |
+ DSI_PORT_BIT(CTRL_RESET_FIFOS));
+
+ DSI_PORT_WRITE(CTRL,
+ DSI_CTRL_HSDT_EOT_DISABLE |
+ DSI_CTRL_RX_LPDT_EOT_DISABLE);
+
+ /* Clear all stat bits so we see what has happened during enable. */
+ DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT));
+
+ /* Set AFE CTR00/CTR1 to release powerdown of analog. */
+ if (dsi->port == 0) {
+ u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) |
+ VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ));
+
+ if (dsi->lanes < 2)
+ afec0 |= DSI0_PHY_AFEC0_PD_DLANE1;
+
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO))
+ afec0 |= DSI0_PHY_AFEC0_RESET;
+
+ DSI_PORT_WRITE(PHY_AFEC0, afec0);
+
+ DSI_PORT_WRITE(PHY_AFEC1,
+ VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE1) |
+ VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE0) |
+ VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_CLANE));
+ } else {
+ u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) |
+ VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ) |
+ VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_CLANE) |
+ VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE0) |
+ VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE1) |
+ VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE2) |
+ VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE3));
+
+ if (dsi->lanes < 4)
+ afec0 |= DSI1_PHY_AFEC0_PD_DLANE3;
+ if (dsi->lanes < 3)
+ afec0 |= DSI1_PHY_AFEC0_PD_DLANE2;
+ if (dsi->lanes < 2)
+ afec0 |= DSI1_PHY_AFEC0_PD_DLANE1;
+
+ afec0 |= DSI1_PHY_AFEC0_RESET;
+
+ DSI_PORT_WRITE(PHY_AFEC0, afec0);
+
+ DSI_PORT_WRITE(PHY_AFEC1, 0);
+
+ /* AFEC reset hold time */
+ mdelay(1);
+ }
+
+ ret = clk_prepare_enable(dsi->escape_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on DSI escape clock: %d\n", ret);
+ return;
+ }
+
+ ret = clk_prepare_enable(dsi->pll_phy_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on DSI PLL: %d\n", ret);
+ return;
+ }
+
+ hs_clock = clk_get_rate(dsi->pll_phy_clock);
+
+ /* Yes, we set the DSI0P/DSI1P pixel clock to the byte rate,
+ * not the pixel clock rate. DSIxP take from the APHY's byte,
+ * DDR2, or DDR4 clock (we use byte) and feed into the PV at
+ * that rate. Separately, a value derived from PIX_CLK_DIV
+ * and HS_CLKC is fed into the PV to divide down to the actual
+ * pixel clock for pushing pixels into DSI.
+ */
+ dsip_clock = phy_clock / 8;
+ ret = clk_set_rate(dsi->pixel_clock, dsip_clock);
+ if (ret) {
+ dev_err(dev, "Failed to set pixel clock to %ldHz: %d\n",
+ dsip_clock, ret);
+ }
+
+ ret = clk_prepare_enable(dsi->pixel_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on DSI pixel clock: %d\n", ret);
+ return;
+ }
+
+ /* How many ns one DSI unit interval is. Note that the clock
+ * is DDR, so there's an extra divide by 2.
+ */
+ ui_ns = DIV_ROUND_UP(500000000, hs_clock);
+
+ DSI_PORT_WRITE(HS_CLT0,
+ VC4_SET_FIELD(dsi_hs_timing(ui_ns, 262, 0),
+ DSI_HS_CLT0_CZERO) |
+ VC4_SET_FIELD(dsi_hs_timing(ui_ns, 0, 8),
+ DSI_HS_CLT0_CPRE) |
+ VC4_SET_FIELD(dsi_hs_timing(ui_ns, 38, 0),
+ DSI_HS_CLT0_CPREP));
+
+ DSI_PORT_WRITE(HS_CLT1,
+ VC4_SET_FIELD(dsi_hs_timing(ui_ns, 60, 0),
+ DSI_HS_CLT1_CTRAIL) |
+ VC4_SET_FIELD(dsi_hs_timing(ui_ns, 60, 52),
+ DSI_HS_CLT1_CPOST));
+
+ DSI_PORT_WRITE(HS_CLT2,
+ VC4_SET_FIELD(dsi_hs_timing(ui_ns, 1000000, 0),
+ DSI_HS_CLT2_WUP));
+
+ DSI_PORT_WRITE(HS_DLT3,
+ VC4_SET_FIELD(dsi_hs_timing(ui_ns, 100, 0),
+ DSI_HS_DLT3_EXIT) |
+ VC4_SET_FIELD(dsi_hs_timing(ui_ns, 105, 6),
+ DSI_HS_DLT3_ZERO) |
+ VC4_SET_FIELD(dsi_hs_timing(ui_ns, 40, 4),
+ DSI_HS_DLT3_PRE));
+
+ DSI_PORT_WRITE(HS_DLT4,
+ VC4_SET_FIELD(dsi_hs_timing(ui_ns, lpx * ESC_TIME_NS, 0),
+ DSI_HS_DLT4_LPX) |
+ VC4_SET_FIELD(max(dsi_hs_timing(ui_ns, 0, 8),
+ dsi_hs_timing(ui_ns, 60, 4)),
+ DSI_HS_DLT4_TRAIL) |
+ VC4_SET_FIELD(0, DSI_HS_DLT4_ANLAT));
+
+ DSI_PORT_WRITE(HS_DLT5, VC4_SET_FIELD(dsi_hs_timing(ui_ns, 1000, 5000),
+ DSI_HS_DLT5_INIT));
+
+ DSI_PORT_WRITE(HS_DLT6,
+ VC4_SET_FIELD(lpx * 5, DSI_HS_DLT6_TA_GET) |
+ VC4_SET_FIELD(lpx, DSI_HS_DLT6_TA_SURE) |
+ VC4_SET_FIELD(lpx * 4, DSI_HS_DLT6_TA_GO) |
+ VC4_SET_FIELD(lpx, DSI_HS_DLT6_LP_LPX));
+
+ DSI_PORT_WRITE(HS_DLT7,
+ VC4_SET_FIELD(dsi_esc_timing(1000000),
+ DSI_HS_DLT7_LP_WUP));
+
+ DSI_PORT_WRITE(PHYC,
+ DSI_PHYC_DLANE0_ENABLE |
+ (dsi->lanes >= 2 ? DSI_PHYC_DLANE1_ENABLE : 0) |
+ (dsi->lanes >= 3 ? DSI_PHYC_DLANE2_ENABLE : 0) |
+ (dsi->lanes >= 4 ? DSI_PHYC_DLANE3_ENABLE : 0) |
+ DSI_PORT_BIT(PHYC_CLANE_ENABLE) |
+ ((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ?
+ 0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) |
+ (dsi->port == 0 ?
+ VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) :
+ VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT)));
+
+ DSI_PORT_WRITE(CTRL,
+ DSI_PORT_READ(CTRL) |
+ DSI_CTRL_CAL_BYTE);
+
+ /* HS timeout in HS clock cycles: disabled. */
+ DSI_PORT_WRITE(HSTX_TO_CNT, 0);
+ /* LP receive timeout in HS clocks. */
+ DSI_PORT_WRITE(LPRX_TO_CNT, 0xffffff);
+ /* Bus turnaround timeout */
+ DSI_PORT_WRITE(TA_TO_CNT, 100000);
+ /* Display reset sequence timeout */
+ DSI_PORT_WRITE(PR_TO_CNT, 100000);
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ DSI_PORT_WRITE(DISP0_CTRL,
+ VC4_SET_FIELD(divider, DSI_DISP0_PIX_CLK_DIV) |
+ VC4_SET_FIELD(format, DSI_DISP0_PFORMAT) |
+ VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
+ DSI_DISP0_LP_STOP_CTRL) |
+ DSI_DISP0_ST_END |
+ DSI_DISP0_ENABLE);
+ } else {
+ DSI_PORT_WRITE(DISP0_CTRL,
+ DSI_DISP0_COMMAND_MODE |
+ DSI_DISP0_ENABLE);
+ }
+
+ /* Set up DISP1 for transferring long command payloads through
+ * the pixfifo.
+ */
+ DSI_PORT_WRITE(DISP1_CTRL,
+ VC4_SET_FIELD(DSI_DISP1_PFORMAT_32BIT_LE,
+ DSI_DISP1_PFORMAT) |
+ DSI_DISP1_ENABLE);
+
+ /* Ungate the block. */
+ if (dsi->port == 0)
+ DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0);
+ else
+ DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN);
+
+ /* Bring AFE out of reset. */
+ if (dsi->port == 0) {
+ } else {
+ DSI_PORT_WRITE(PHY_AFEC0,
+ DSI_PORT_READ(PHY_AFEC0) &
+ ~DSI1_PHY_AFEC0_RESET);
+ }
+
+ vc4_dsi_ulps(dsi, false);
+
+ if (debug_dump_regs) {
+ DRM_INFO("DSI regs after:\n");
+ vc4_dsi_dump_regs(dsi);
+ }
+
+ ret = drm_panel_enable(dsi->panel);
+ if (ret) {
+ DRM_ERROR("Panel failed to enable\n");
+ drm_panel_unprepare(dsi->panel);
+ return;
+ }
+}
+
+static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct vc4_dsi *dsi = host_to_dsi(host);
+ struct mipi_dsi_packet packet;
+ u32 pkth = 0, pktc = 0;
+ int i, ret;
+ bool is_long = mipi_dsi_packet_format_is_long(msg->type);
+ u32 cmd_fifo_len = 0, pix_fifo_len = 0;
+
+ mipi_dsi_create_packet(&packet, msg);
+
+ pkth |= VC4_SET_FIELD(packet.header[0], DSI_TXPKT1H_BC_DT);
+ pkth |= VC4_SET_FIELD(packet.header[1] |
+ (packet.header[2] << 8),
+ DSI_TXPKT1H_BC_PARAM);
+ if (is_long) {
+ /* Divide data across the various FIFOs we have available.
+ * The command FIFO takes byte-oriented data, but is of
+ * limited size. The pixel FIFO (never actually used for
+ * pixel data in reality) is word oriented, and substantially
+ * larger. So, we use the pixel FIFO for most of the data,
+ * sending the residual bytes in the command FIFO at the start.
+ *
+ * With this arrangement, the command FIFO will never get full.
+ */
+ if (packet.payload_length <= 16) {
+ cmd_fifo_len = packet.payload_length;
+ pix_fifo_len = 0;
+ } else {
+ cmd_fifo_len = (packet.payload_length %
+ DSI_PIX_FIFO_WIDTH);
+ pix_fifo_len = ((packet.payload_length - cmd_fifo_len) /
+ DSI_PIX_FIFO_WIDTH);
+ }
+
+ WARN_ON_ONCE(pix_fifo_len >= DSI_PIX_FIFO_DEPTH);
+
+ pkth |= VC4_SET_FIELD(cmd_fifo_len, DSI_TXPKT1H_BC_CMDFIFO);
+ }
+
+ if (msg->rx_len) {
+ pktc |= VC4_SET_FIELD(DSI_TXPKT1C_CMD_CTRL_RX,
+ DSI_TXPKT1C_CMD_CTRL);
+ } else {
+ pktc |= VC4_SET_FIELD(DSI_TXPKT1C_CMD_CTRL_TX,
+ DSI_TXPKT1C_CMD_CTRL);
+ }
+
+ for (i = 0; i < cmd_fifo_len; i++)
+ DSI_PORT_WRITE(TXPKT_CMD_FIFO, packet.payload[i]);
+ for (i = 0; i < pix_fifo_len; i++) {
+ const u8 *pix = packet.payload + cmd_fifo_len + i * 4;
+
+ DSI_PORT_WRITE(TXPKT_PIX_FIFO,
+ pix[0] |
+ pix[1] << 8 |
+ pix[2] << 16 |
+ pix[3] << 24);
+ }
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ pktc |= DSI_TXPKT1C_CMD_MODE_LP;
+ if (is_long)
+ pktc |= DSI_TXPKT1C_CMD_TYPE_LONG;
+
+ /* Send one copy of the packet. Larger repeats are used for pixel
+ * data in command mode.
+ */
+ pktc |= VC4_SET_FIELD(1, DSI_TXPKT1C_CMD_REPEAT);
+
+ pktc |= DSI_TXPKT1C_CMD_EN;
+ if (pix_fifo_len) {
+ pktc |= VC4_SET_FIELD(DSI_TXPKT1C_DISPLAY_NO_SECONDARY,
+ DSI_TXPKT1C_DISPLAY_NO);
+ } else {
+ pktc |= VC4_SET_FIELD(DSI_TXPKT1C_DISPLAY_NO_SHORT,
+ DSI_TXPKT1C_DISPLAY_NO);
+ }
+
+ /* Enable the appropriate interrupt for the transfer completion. */
+ dsi->xfer_result = 0;
+ reinit_completion(&dsi->xfer_completion);
+ DSI_PORT_WRITE(INT_STAT, DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF);
+ if (msg->rx_len) {
+ DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
+ DSI1_INT_PHY_DIR_RTF));
+ } else {
+ DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
+ DSI1_INT_TXPKT1_DONE));
+ }
+
+ /* Send the packet. */
+ DSI_PORT_WRITE(TXPKT1H, pkth);
+ DSI_PORT_WRITE(TXPKT1C, pktc);
+
+ if (!wait_for_completion_timeout(&dsi->xfer_completion,
+ msecs_to_jiffies(1000))) {
+ dev_err(&dsi->pdev->dev, "transfer interrupt wait timeout");
+ dev_err(&dsi->pdev->dev, "instat: 0x%08x\n",
+ DSI_PORT_READ(INT_STAT));
+ ret = -ETIMEDOUT;
+ } else {
+ ret = dsi->xfer_result;
+ }
+
+ DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED);
+
+ if (ret)
+ goto reset_fifo_and_return;
+
+ if (ret == 0 && msg->rx_len) {
+ u32 rxpkt1h = DSI_PORT_READ(RXPKT1H);
+ u8 *msg_rx = msg->rx_buf;
+
+ if (rxpkt1h & DSI_RXPKT1H_PKT_TYPE_LONG) {
+ u32 rxlen = VC4_GET_FIELD(rxpkt1h,
+ DSI_RXPKT1H_BC_PARAM);
+
+ if (rxlen != msg->rx_len) {
+ DRM_ERROR("DSI returned %db, expecting %db\n",
+ rxlen, (int)msg->rx_len);
+ ret = -ENXIO;
+ goto reset_fifo_and_return;
+ }
+
+ for (i = 0; i < msg->rx_len; i++)
+ msg_rx[i] = DSI_READ(DSI1_RXPKT_FIFO);
+ } else {
+ /* FINISHME: Handle AWER */
+
+ msg_rx[0] = VC4_GET_FIELD(rxpkt1h,
+ DSI_RXPKT1H_SHORT_0);
+ if (msg->rx_len > 1) {
+ msg_rx[1] = VC4_GET_FIELD(rxpkt1h,
+ DSI_RXPKT1H_SHORT_1);
+ }
+ }
+ }
+
+ return ret;
+
+reset_fifo_and_return:
+ DRM_ERROR("DSI transfer failed, resetting: %d\n", ret);
+
+ DSI_PORT_WRITE(TXPKT1C, DSI_PORT_READ(TXPKT1C) & ~DSI_TXPKT1C_CMD_EN);
+ udelay(1);
+ DSI_PORT_WRITE(CTRL,
+ DSI_PORT_READ(CTRL) |
+ DSI_PORT_BIT(CTRL_RESET_FIFOS));
+
+ DSI_PORT_WRITE(TXPKT1C, 0);
+ DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED);
+ return ret;
+}
+
+static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct vc4_dsi *dsi = host_to_dsi(host);
+ int ret = 0;
+
+ dsi->lanes = device->lanes;
+ dsi->channel = device->channel;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ dev_err(&dsi->pdev->dev,
+ "Only VIDEO mode panels supported currently.\n");
+ return 0;
+ }
+
+ dsi->panel = of_drm_find_panel(device->dev.of_node);
+ if (!dsi->panel)
+ return 0;
+
+ ret = drm_panel_attach(dsi->panel, dsi->connector);
+ if (ret != 0)
+ return ret;
+
+ drm_helper_hpd_irq_event(dsi->connector->dev);
+
+ return 0;
+}
+
+static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct vc4_dsi *dsi = host_to_dsi(host);
+
+ if (dsi->panel) {
+ int ret = drm_panel_detach(dsi->panel);
+
+ if (ret)
+ return ret;
+
+ dsi->panel = NULL;
+
+ drm_helper_hpd_irq_event(dsi->connector->dev);
+ }
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops vc4_dsi_host_ops = {
+ .attach = vc4_dsi_host_attach,
+ .detach = vc4_dsi_host_detach,
+ .transfer = vc4_dsi_host_transfer,
+};
+
+static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
+ .disable = vc4_dsi_encoder_disable,
+ .enable = vc4_dsi_encoder_enable,
+};
+
+static const struct of_device_id vc4_dsi_dt_match[] = {
+ { .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 },
+ {}
+};
+
+static void dsi_handle_error(struct vc4_dsi *dsi,
+ irqreturn_t *ret, u32 stat, u32 bit,
+ const char *type)
+{
+ if (!(stat & bit))
+ return;
+
+ DRM_ERROR("DSI%d: %s error\n", dsi->port, type);
+ *ret = IRQ_HANDLED;
+}
+
+static irqreturn_t vc4_dsi_irq_handler(int irq, void *data)
+{
+ struct vc4_dsi *dsi = data;
+ u32 stat = DSI_PORT_READ(INT_STAT);
+ irqreturn_t ret = IRQ_NONE;
+
+ DSI_PORT_WRITE(INT_STAT, stat);
+
+ dsi_handle_error(dsi, &ret, stat,
+ DSI1_INT_ERR_SYNC_ESC, "LPDT sync");
+ dsi_handle_error(dsi, &ret, stat,
+ DSI1_INT_ERR_CONTROL, "data lane 0 sequence");
+ dsi_handle_error(dsi, &ret, stat,
+ DSI1_INT_ERR_CONT_LP0, "LP0 contention");
+ dsi_handle_error(dsi, &ret, stat,
+ DSI1_INT_ERR_CONT_LP1, "LP1 contention");
+ dsi_handle_error(dsi, &ret, stat,
+ DSI1_INT_HSTX_TO, "HSTX timeout");
+ dsi_handle_error(dsi, &ret, stat,
+ DSI1_INT_LPRX_TO, "LPRX timeout");
+ dsi_handle_error(dsi, &ret, stat,
+ DSI1_INT_TA_TO, "turnaround timeout");
+ dsi_handle_error(dsi, &ret, stat,
+ DSI1_INT_PR_TO, "peripheral reset timeout");
+
+ if (stat & (DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF)) {
+ complete(&dsi->xfer_completion);
+ ret = IRQ_HANDLED;
+ } else if (stat & DSI1_INT_HSTX_TO) {
+ complete(&dsi->xfer_completion);
+ dsi->xfer_result = -ETIMEDOUT;
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/**
+ * Exposes clocks generated by the analog PHY that are consumed by
+ * CPRMAN (clk-bcm2835.c).
+ */
+static int
+vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
+{
+ struct device *dev = &dsi->pdev->dev;
+ const char *parent_name = __clk_get_name(dsi->pll_phy_clock);
+ static const struct {
+ const char *dsi0_name, *dsi1_name;
+ int div;
+ } phy_clocks[] = {
+ { "dsi0_byte", "dsi1_byte", 8 },
+ { "dsi0_ddr2", "dsi1_ddr2", 4 },
+ { "dsi0_ddr", "dsi1_ddr", 2 },
+ };
+ int i;
+
+ dsi->clk_onecell = devm_kzalloc(dev,
+ sizeof(*dsi->clk_onecell) +
+ ARRAY_SIZE(phy_clocks) *
+ sizeof(struct clk_hw *),
+ GFP_KERNEL);
+ if (!dsi->clk_onecell)
+ return -ENOMEM;
+ dsi->clk_onecell->num = ARRAY_SIZE(phy_clocks);
+
+ for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) {
+ struct clk_fixed_factor *fix = &dsi->phy_clocks[i];
+ struct clk_init_data init;
+ int ret;
+
+ /* We just use core fixed factor clock ops for the PHY
+ * clocks. The clocks are actually gated by the
+ * PHY_AFEC0_DDRCLK_EN bits, which we should be
+ * setting if we use the DDR/DDR2 clocks. However,
+ * vc4_dsi_encoder_enable() is setting up both AFEC0,
+ * setting both our parent DSI PLL's rate and this
+ * clock's rate, so it knows if DDR/DDR2 are going to
+ * be used and could enable the gates itself.
+ */
+ fix->mult = 1;
+ fix->div = phy_clocks[i].div;
+ fix->hw.init = &init;
+
+ memset(&init, 0, sizeof(init));
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ if (dsi->port == 1)
+ init.name = phy_clocks[i].dsi1_name;
+ else
+ init.name = phy_clocks[i].dsi0_name;
+ init.ops = &clk_fixed_factor_ops;
+
+ ret = devm_clk_hw_register(dev, &fix->hw);
+ if (ret)
+ return ret;
+
+ dsi->clk_onecell->hws[i] = &fix->hw;
+ }
+
+ return of_clk_add_hw_provider(dev->of_node,
+ of_clk_hw_onecell_get,
+ dsi->clk_onecell);
+}
+
+static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_dsi *dsi;
+ struct vc4_dsi_encoder *vc4_dsi_encoder;
+ const struct of_device_id *match;
+ dma_cap_mask_t dma_mask;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ match = of_match_device(vc4_dsi_dt_match, dev);
+ if (!match)
+ return -ENODEV;
+
+ dsi->port = (uintptr_t)match->data;
+
+ vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
+ GFP_KERNEL);
+ if (!vc4_dsi_encoder)
+ return -ENOMEM;
+ vc4_dsi_encoder->base.type = VC4_ENCODER_TYPE_DSI1;
+ vc4_dsi_encoder->dsi = dsi;
+ dsi->encoder = &vc4_dsi_encoder->base.base;
+
+ dsi->pdev = pdev;
+ dsi->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(dsi->regs))
+ return PTR_ERR(dsi->regs);
+
+ if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
+ dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
+ DSI_PORT_READ(ID), DSI_ID_VALUE);
+ return -ENODEV;
+ }
+
+ /* DSI1 has a broken AXI slave that doesn't respond to writes
+ * from the ARM. It does handle writes from the DMA engine,
+ * so set up a channel for talking to it.
+ */
+ if (dsi->port == 1) {
+ dsi->reg_dma_mem = dma_alloc_coherent(dev, 4,
+ &dsi->reg_dma_paddr,
+ GFP_KERNEL);
+ if (!dsi->reg_dma_mem) {
+ DRM_ERROR("Failed to get DMA memory\n");
+ return -ENOMEM;
+ }
+
+ dma_cap_zero(dma_mask);
+ dma_cap_set(DMA_MEMCPY, dma_mask);
+ dsi->reg_dma_chan = dma_request_chan_by_mask(&dma_mask);
+ if (IS_ERR(dsi->reg_dma_chan)) {
+ ret = PTR_ERR(dsi->reg_dma_chan);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get DMA channel: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Get the physical address of the device's registers. The
+ * struct resource for the regs gives us the bus address
+ * instead.
+ */
+ dsi->reg_paddr = be32_to_cpup(of_get_address(dev->of_node,
+ 0, NULL, NULL));
+ }
+
+ init_completion(&dsi->xfer_completion);
+ /* At startup enable error-reporting interrupts and nothing else. */
+ DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED);
+ /* Clear any existing interrupt state. */
+ DSI_PORT_WRITE(INT_STAT, DSI_PORT_READ(INT_STAT));
+
+ ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ vc4_dsi_irq_handler, 0, "vc4 dsi", dsi);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get interrupt: %d\n", ret);
+ return ret;
+ }
+
+ dsi->escape_clock = devm_clk_get(dev, "escape");
+ if (IS_ERR(dsi->escape_clock)) {
+ ret = PTR_ERR(dsi->escape_clock);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get escape clock: %d\n", ret);
+ return ret;
+ }
+
+ dsi->pll_phy_clock = devm_clk_get(dev, "phy");
+ if (IS_ERR(dsi->pll_phy_clock)) {
+ ret = PTR_ERR(dsi->pll_phy_clock);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get phy clock: %d\n", ret);
+ return ret;
+ }
+
+ dsi->pixel_clock = devm_clk_get(dev, "pixel");
+ if (IS_ERR(dsi->pixel_clock)) {
+ ret = PTR_ERR(dsi->pixel_clock);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+ return ret;
+ }
+
+ /* The esc clock rate is supposed to always be 100Mhz. */
+ ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
+ if (ret) {
+ dev_err(dev, "Failed to set esc clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = vc4_dsi_init_phy_clocks(dsi);
+ if (ret)
+ return ret;
+
+ if (dsi->port == 1)
+ vc4->dsi1 = dsi;
+
+ drm_encoder_init(drm, dsi->encoder, &vc4_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
+
+ dsi->connector = vc4_dsi_connector_init(drm, dsi);
+ if (IS_ERR(dsi->connector)) {
+ ret = PTR_ERR(dsi->connector);
+ goto err_destroy_encoder;
+ }
+
+ dsi->dsi_host.ops = &vc4_dsi_host_ops;
+ dsi->dsi_host.dev = dev;
+
+ mipi_dsi_host_register(&dsi->dsi_host);
+
+ dev_set_drvdata(dev, dsi);
+
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_destroy_encoder:
+ vc4_dsi_encoder_destroy(dsi->encoder);
+
+ return ret;
+}
+
+static void vc4_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_dsi *dsi = dev_get_drvdata(dev);
+
+ pm_runtime_disable(dev);
+
+ vc4_dsi_connector_destroy(dsi->connector);
+ vc4_dsi_encoder_destroy(dsi->encoder);
+
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+
+ clk_disable_unprepare(dsi->pll_phy_clock);
+ clk_disable_unprepare(dsi->escape_clock);
+
+ if (dsi->port == 1)
+ vc4->dsi1 = NULL;
+}
+
+static const struct component_ops vc4_dsi_ops = {
+ .bind = vc4_dsi_bind,
+ .unbind = vc4_dsi_unbind,
+};
+
+static int vc4_dsi_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_dsi_ops);
+}
+
+static int vc4_dsi_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_dsi_ops);
+ return 0;
+}
+
+struct platform_driver vc4_dsi_driver = {
+ .probe = vc4_dsi_dev_probe,
+ .remove = vc4_dsi_dev_remove,
+ .driver = {
+ .name = "vc4_dsi",
+ .of_match_table = vc4_dsi_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index c4cb2e26de32..93d5994f3a04 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -356,15 +356,11 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
return;
}
- if (vc4_encoder->rgb_range_selectable) {
- if (vc4_encoder->limited_rgb_range) {
- frame.avi.quantization_range =
- HDMI_QUANTIZATION_RANGE_LIMITED;
- } else {
- frame.avi.quantization_range =
- HDMI_QUANTIZATION_RANGE_FULL;
- }
- }
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+ vc4_encoder->limited_rgb_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL,
+ vc4_encoder->rgb_range_selectable);
vc4_hdmi_write_infoframe(encoder, &frame);
}
@@ -463,7 +459,9 @@ static void vc4_hdmi_encoder_mode_set(struct drm_encoder *encoder,
csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
VC4_HD_CSC_CTL_ORDER);
- if (vc4_encoder->hdmi_monitor && drm_match_cea_mode(mode) > 1) {
+ if (vc4_encoder->hdmi_monitor &&
+ drm_default_rgb_quant_range(mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED) {
/* CEA VICs other than #1 requre limited range RGB
* output unless overridden by an AVI infoframe.
* Apply a colorspace conversion to squash 0-255 down
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 6fbab1c82cb1..f7f7677f6d8d 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -141,8 +141,7 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
int ret, i;
u32 __iomem *dst_kernel;
- ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS, 1,
- 0);
+ ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
if (ret) {
DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
ret);
@@ -170,6 +169,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
struct vc4_dev *vc4 = drm->dev_private;
struct vc4_hvs *hvs = NULL;
int ret;
+ u32 dispctrl;
hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
@@ -211,6 +211,19 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
return ret;
vc4->hvs = hvs;
+
+ dispctrl = HVS_READ(SCALER_DISPCTRL);
+
+ dispctrl |= SCALER_DISPCTRL_ENABLE;
+
+ /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
+ * be unused.
+ */
+ dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
+
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index be8dd8262f27..ad7925a9e0ea 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -231,7 +231,6 @@ int vc4_kms_load(struct drm_device *dev)
drm_mode_config_reset(dev);
vc4->fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_crtc,
dev->mode_config.num_connector);
if (IS_ERR(vc4->fbdev))
vc4->fbdev = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 686cdd3c86f2..f7a229df572d 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -295,8 +295,8 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
u32 subpixel_src_mask = (1 << 16) - 1;
- u32 format = fb->pixel_format;
- int num_planes = drm_format_num_planes(format);
+ u32 format = fb->format->format;
+ int num_planes = fb->format->num_planes;
u32 h_subsample = 1;
u32 v_subsample = 1;
int i;
@@ -369,7 +369,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
*/
if (vc4_state->crtc_x < 0) {
for (i = 0; i < num_planes; i++) {
- u32 cpp = drm_format_plane_cpp(fb->pixel_format, i);
+ u32 cpp = fb->format->cpp[i];
u32 subs = ((i == 0) ? 1 : h_subsample);
vc4_state->offsets[i] += (cpp *
@@ -496,7 +496,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
u32 ctl0_offset = vc4_state->dlist_count;
- const struct hvs_format *format = vc4_get_hvs_format(fb->pixel_format);
+ const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
int num_planes = drm_format_num_planes(format->drm);
u32 scl0, scl1;
u32 lbm_size;
@@ -514,9 +514,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
if (lbm_size) {
if (!vc4_state->lbm.allocated) {
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
- ret = drm_mm_insert_node(&vc4->hvs->lbm_mm,
- &vc4_state->lbm,
- lbm_size, 32, 0);
+ ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
+ &vc4_state->lbm,
+ lbm_size, 32, 0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
} else {
WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 39f6886b2410..385405a2df05 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -190,6 +190,8 @@
# define PV_VCONTROL_ODD_DELAY_SHIFT 6
# define PV_VCONTROL_ODD_FIRST BIT(5)
# define PV_VCONTROL_INTERLACE BIT(4)
+# define PV_VCONTROL_DSI BIT(3)
+# define PV_VCONTROL_COMMAND BIT(2)
# define PV_VCONTROL_CONTINUOUS BIT(1)
# define PV_VCONTROL_VIDEN BIT(0)
@@ -244,6 +246,9 @@
# define SCALER_DISPCTRL_ENABLE BIT(31)
# define SCALER_DISPCTRL_DSP2EISLUR BIT(15)
# define SCALER_DISPCTRL_DSP1EISLUR BIT(14)
+# define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18)
+# define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18
+
/* Enables Display 0 short line and underrun contribution to
* SCALER_DISPSTAT_IRQDISP0. Note that short frame contributions are
* always enabled.
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 1f8798ad329c..cb59c7ab98b9 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -31,6 +31,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <drm/drm_cache.h>
#include <uapi/drm/vgem_drm.h>
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index da25dfe7b80e..3109c8308eb5 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -190,12 +190,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Expose the fence via the dma-buf */
ret = 0;
- ww_mutex_lock(&resv->lock, NULL);
+ reservation_object_lock(resv, NULL);
if (arg->flags & VGEM_FENCE_WRITE)
reservation_object_add_excl_fence(resv, fence);
else if ((ret = reservation_object_reserve_shared(resv)) == 0)
reservation_object_add_shared_fence(resv, fence);
- ww_mutex_unlock(&resv->lock);
+ reservation_object_unlock(resv);
/* Record the fence in our idr for later signaling */
if (ret == 0) {
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 286a785fab4f..9873942ca8f4 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -134,7 +134,7 @@ extern int via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file
extern int via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
-extern int via_driver_unload(struct drm_device *dev);
+extern void via_driver_unload(struct drm_device *dev);
extern int via_init_context(struct drm_device *dev, int context);
extern int via_final_context(struct drm_device *dev, int context);
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 0b3522dba6e8..2ad865870372 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -116,13 +116,11 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
return 0;
}
-int via_driver_unload(struct drm_device *dev)
+void via_driver_unload(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
idr_destroy(&dev_priv->object_idr);
kfree(dev_priv);
-
- return 0;
}
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index a04ef1c992d9..4217d66a5cc6 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
if (mem->type == VIA_MEM_AGP)
retval = drm_mm_insert_node(&dev_priv->agp_mm,
&item->mm_node,
- tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
+ tmpSize);
else
retval = drm_mm_insert_node(&dev_priv->vram_mm,
&item->mm_node,
- tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
+ tmpSize);
if (retval)
goto fail_alloc;
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index 81d1807ac228..0c384d9a2b75 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,6 +1,6 @@
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
- depends on DRM && VIRTIO
+ depends on DRM && VIRTIO && MMU
select DRM_KMS_HELPER
select DRM_TTM
help
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 58048709c34e..fad5a1cc5903 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -88,12 +88,13 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
bo = gem_to_virtio_gpu_obj(obj);
+ drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
+
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
vgfb->obj = NULL;
return ret;
}
- drm_helper_mode_fill_fb_struct(&vgfb->base, mode_cmd);
spin_lock_init(&vgfb->dirty_lock);
vgfb->x1 = vgfb->y1 = INT_MAX;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 3b97d50fd392..43e1d5916c6c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -83,10 +83,6 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
if (ret)
goto err_free;
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
- driver->major, driver->minor, driver->patchlevel,
- driver->date, dev->primary->index);
-
return 0;
err_free:
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 08906c8ce3fa..2f766735c16d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -35,6 +35,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
@@ -214,7 +215,7 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
/* virtio_kms.c */
int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
-int virtio_gpu_driver_unload(struct drm_device *dev);
+void virtio_gpu_driver_unload(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index cde9f3758106..163a67db8cf1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -43,7 +43,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
struct drm_device *dev = fb->base.dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
bool store_for_later = false;
- int bpp = fb->base.bits_per_pixel / 8;
+ int bpp = fb->base.format->cpp[0];
int x2, y2;
unsigned long flags;
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
@@ -333,7 +333,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
info->screen_buffer = obj->vmap;
info->screen_size = obj->gem_base.size;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &vfbdev->helper,
sizes->fb_width, sizes->fb_height);
@@ -387,7 +387,6 @@ int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
&virtio_gpu_fb_helper_funcs);
ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
- vgdev->num_scanouts,
VIRTIO_GPUFB_CONN_LIMIT);
if (ret) {
kfree(vgfbdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 1235519853f4..30f989a0cafc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -166,10 +166,14 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
INIT_WORK(&vgdev->config_changed_work,
virtio_gpu_config_changed_work_func);
+#ifdef __LITTLE_ENDIAN
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
DRM_INFO("virgl 3d acceleration %s\n",
- vgdev->has_virgl_3d ? "enabled" : "not available");
+ vgdev->has_virgl_3d ? "enabled" : "not supported by host");
+#else
+ DRM_INFO("virgl 3d acceleration not supported by guest\n");
+#endif
ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
callbacks, names);
@@ -246,7 +250,7 @@ static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
}
}
-int virtio_gpu_driver_unload(struct drm_device *dev)
+void virtio_gpu_driver_unload(struct drm_device *dev)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
@@ -262,7 +266,6 @@ int virtio_gpu_driver_unload(struct drm_device *dev)
virtio_gpu_cleanup_cap_cache(vgdev);
kfree(vgdev->capsets);
kfree(vgdev);
- return 0;
}
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 4a1de9f81193..9cc7079f7aca 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -198,11 +198,11 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
}
static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
- ttm_bo_man_init,
- ttm_bo_man_takedown,
- ttm_bo_man_get_node,
- ttm_bo_man_put_node,
- ttm_bo_man_debug
+ .init = ttm_bo_man_init,
+ .takedown = ttm_bo_man_takedown,
+ .get_node = ttm_bo_man_get_node,
+ .put_node = ttm_bo_man_put_node,
+ .debug = ttm_bo_man_debug
};
static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
@@ -386,6 +386,7 @@ static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
}
static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
+ bool evict,
struct ttm_mem_reg *new_mem)
{
struct virtio_gpu_object *bo;
@@ -433,8 +434,6 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
.move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
- .lru_tail = &ttm_bo_default_lru_tail,
- .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index fb7b82aad763..8c308dac99c5 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && X86
+ depends on DRM && PCI && X86 && MMU
select FB_DEFERRED_IO
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index 531d22025fec..babe7cb84fc2 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -980,6 +980,8 @@ svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
size.width = max_t(u32, base_level.width >> mip_level, 1);
size.height = max_t(u32, base_level.height >> mip_level, 1);
size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+ size.pad64 = 0;
+
return size;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index c894a48a74a6..4c7f24a67a2e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -825,6 +825,7 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
* (currently only resources).
*/
static void vmw_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
struct ttm_mem_reg *mem)
{
vmw_resource_move_notify(bo, mem);
@@ -839,7 +840,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
- ttm_bo_wait(bo, false, false);
+ (void) ttm_bo_wait(bo, false, false);
}
@@ -858,6 +859,4 @@ struct ttm_bo_driver vmw_bo_driver = {
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
- .lru_tail = &ttm_bo_default_lru_tail,
- .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index aa04fb0159a7..77cb7c627e09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -673,16 +673,10 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
memset(info->node, 0, sizeof(*info->node));
spin_lock_bh(&man->lock);
- ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
- 0, 0,
- DRM_MM_SEARCH_DEFAULT,
- DRM_MM_CREATE_DEFAULT);
+ ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
if (ret) {
vmw_cmdbuf_man_process(man);
- ret = drm_mm_insert_node_generic(&man->mm, info->node,
- info->page_size, 0, 0,
- DRM_MM_SEARCH_DEFAULT,
- DRM_MM_CREATE_DEFAULT);
+ ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
}
spin_unlock_bh(&man->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 18061a4bc2f2..541a5887dd6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -951,7 +951,7 @@ out_err0:
return ret;
}
-static int vmw_driver_unload(struct drm_device *dev)
+static void vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
enum vmw_res_type i;
@@ -998,8 +998,6 @@ static int vmw_driver_unload(struct drm_device *dev)
idr_destroy(&dev_priv->res_idr[i]);
kfree(dev_priv);
-
- return 0;
}
static void vmw_postclose(struct drm_device *dev,
@@ -1295,7 +1293,7 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv)
*/
void vmw_svga_enable(struct vmw_private *dev_priv)
{
- ttm_read_lock(&dev_priv->reservation_sem, false);
+ (void) ttm_read_lock(&dev_priv->reservation_sem, false);
__vmw_svga_enable(dev_priv);
ttm_read_unlock(&dev_priv->reservation_sem);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 7a96798b9c0a..e9005b9a5e8c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -83,7 +83,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;
}
- switch (par->set_fb->depth) {
+ switch (par->set_fb->format->depth) {
case 24:
case 32:
pal[regno] = ((red & 0xff00) << 8) |
@@ -91,8 +91,9 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
((blue & 0xff00) >> 8);
break;
default:
- DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
- par->set_fb->bits_per_pixel);
+ DRM_ERROR("Bad depth %u, bpp %u.\n",
+ par->set_fb->format->depth,
+ par->set_fb->format->cpp[0] * 8);
return 1;
}
@@ -197,7 +198,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
* Handle panning when copying from vmalloc to framebuffer.
* Clip dirty area to framebuffer.
*/
- cpp = (cur_fb->bits_per_pixel + 7) / 8;
+ cpp = cur_fb->format->cpp[0];
max_x = par->fb_x + cur_fb->width;
max_y = par->fb_y + cur_fb->height;
@@ -486,7 +487,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
cur_fb = par->set_fb;
if (cur_fb && cur_fb->width == mode_cmd.width &&
cur_fb->height == mode_cmd.height &&
- cur_fb->pixel_format == mode_cmd.pixel_format &&
+ cur_fb->format->format == mode_cmd.pixel_format &&
cur_fb->pitches[0] == mode_cmd.pitches[0])
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 170b61be1e4e..fec7348cea2c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
}
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
- vmw_gmrid_man_init,
- vmw_gmrid_man_takedown,
- vmw_gmrid_man_get_node,
- vmw_gmrid_man_put_node,
- vmw_gmrid_man_debug
+ .init = vmw_gmrid_man_init,
+ .takedown = vmw_gmrid_man_takedown,
+ .get_node = vmw_gmrid_man_get_node,
+ .put_node = vmw_gmrid_man_put_node,
+ .debug = vmw_gmrid_man_debug
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e7daf59bac80..d492d57d5309 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -583,7 +583,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- drm_helper_mode_fill_fb_struct(&vfbs->base.base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handles[0];
vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
@@ -757,7 +757,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
struct vmw_surface **srf_out)
{
uint32_t format;
- struct drm_vmw_size content_base_size;
+ struct drm_vmw_size content_base_size = {0};
struct vmw_resource *res;
unsigned int bytes_pp;
struct drm_format_name_buf format_name;
@@ -864,7 +864,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err1;
}
- drm_helper_mode_fill_fb_struct(&vfbd->base.base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.dmabuf = true;
vfbd->buffer = vmw_dmabuf_reference(dmabuf);
vfbd->base.user_handle = mode_cmd->handles[0];
@@ -1671,7 +1671,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
* 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
* 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
*/
- u64 bb_mem = bounding_box.w * bounding_box.h * 4;
+ u64 bb_mem = (u64) bounding_box.w * bounding_box.h * 4;
u64 pixel_mem = total_pixels * 4;
if (bb_mem > dev_priv->prim_bb_mem) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index f42ce9a1c3ac..cb36e1d70133 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -30,6 +30,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
#include "vmwgfx_drv.h"
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 23ec673d5e16..3806148e1bdb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -97,7 +97,8 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
fb = entry->base.crtc.primary->fb;
return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
- fb->bits_per_pixel, fb->depth);
+ fb->format->cpp[0] * 8,
+ fb->format->depth);
}
if (!list_empty(&lds->active)) {
@@ -105,7 +106,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
fb = entry->base.crtc.primary->fb;
vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
- fb->bits_per_pixel, fb->depth);
+ fb->format->cpp[0] * 8, fb->format->depth);
}
/* Make sure we always show something. */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index b6126a5f1269..941bcfd131ff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -319,18 +319,17 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
int ret;
if (dev_priv->has_dx) {
- *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
+ *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
if (*otables == NULL)
return -ENOMEM;
- memcpy(*otables, dx_tables, sizeof(dx_tables));
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
} else {
- *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
+ *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
+ GFP_KERNEL);
if (*otables == NULL)
return -ENOMEM;
- memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8e86d6d4141b..65b3f0369636 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1760,7 +1760,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
struct vmw_private *dev_priv = res->dev_priv;
int ret;
- ttm_read_lock(&dev_priv->reservation_sem, false);
+ (void) ttm_read_lock(&dev_priv->reservation_sem, false);
mutex_lock(&dev_priv->cmdbuf_mutex);
ret = vmw_resource_reserve(res, false, true);
@@ -1770,7 +1770,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
if (--res->pin_count == 0 && res->backup) {
struct vmw_dma_buffer *vbo = res->backup;
- ttm_bo_reserve(&vbo->base, false, false, NULL);
+ (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(&vbo->base);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index f42359084adc..d4268efc37d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -598,7 +598,7 @@ static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf =
container_of(framebuffer, struct vmw_framebuffer_dmabuf,
base)->buffer;
- int depth = framebuffer->base.depth;
+ int depth = framebuffer->base.format->depth;
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
@@ -618,7 +618,7 @@ static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
}
cmd->header = SVGA_CMD_DEFINE_GMRFB;
- cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+ cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
cmd->body.format.colorDepth = depth;
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 94ad8d2acf9a..b27cd18ee66a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -424,7 +424,7 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
*/
if (new_content_type == SEPARATE_DMA) {
- switch (new_fb->bits_per_pixel) {
+ switch (new_fb->format->cpp[0] * 8) {
case 32:
content_srf.format = SVGA3D_X8R8G8B8;
break;
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
index 4065b2840f1c..5b36421ef3e5 100644
--- a/drivers/gpu/drm/zte/Kconfig
+++ b/drivers/gpu/drm/zte/Kconfig
@@ -4,5 +4,7 @@ config DRM_ZTE
select DRM_KMS_CMA_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_HELPER
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ select VIDEOMODE_HELPERS
help
Choose this option to enable DRM on ZTE ZX SoCs.
diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile
index 699180bfd57c..01352b56c418 100644
--- a/drivers/gpu/drm/zte/Makefile
+++ b/drivers/gpu/drm/zte/Makefile
@@ -2,6 +2,7 @@ zxdrm-y := \
zx_drm_drv.o \
zx_hdmi.o \
zx_plane.o \
+ zx_tvenc.o \
zx_vou.o
obj-$(CONFIG_DRM_ZTE) += zxdrm.o
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 3e76f72c92ff..5c6944a1e72c 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -141,7 +141,7 @@ static int zx_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- priv->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ priv->fbdev = drm_fbdev_cma_init(drm, 32,
drm->mode_config.num_connector);
if (IS_ERR(priv->fbdev)) {
ret = PTR_ERR(priv->fbdev);
@@ -247,6 +247,7 @@ static struct platform_driver zx_drm_platform_driver = {
static struct platform_driver *drivers[] = {
&zx_crtc_driver,
&zx_hdmi_driver,
+ &zx_tvenc_driver,
&zx_drm_platform_driver,
};
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.h b/drivers/gpu/drm/zte/zx_drm_drv.h
index e65cd18a6cba..5ca035b079c7 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.h
+++ b/drivers/gpu/drm/zte/zx_drm_drv.h
@@ -13,6 +13,7 @@
extern struct platform_driver zx_crtc_driver;
extern struct platform_driver zx_hdmi_driver;
+extern struct platform_driver zx_tvenc_driver;
static inline u32 zx_readl(void __iomem *reg)
{
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index 6bf6c364811e..c47b9cbfe270 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -25,6 +25,8 @@
#include <drm/drm_of.h>
#include <drm/drmP.h>
+#include <sound/hdmi-codec.h>
+
#include "zx_hdmi_regs.h"
#include "zx_vou.h"
@@ -49,17 +51,11 @@ struct zx_hdmi {
bool sink_is_hdmi;
bool sink_has_audio;
const struct vou_inf *inf;
+ struct platform_device *audio_pdev;
};
#define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x)
-static const struct vou_inf vou_inf_hdmi = {
- .id = VOU_HDMI,
- .data_sel = VOU_YUV444,
- .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
- .clocks_sel_bits = BIT(13) | BIT(2),
-};
-
static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset)
{
return readl_relaxed(hdmi->mmio + offset * 4);
@@ -238,14 +234,14 @@ static void zx_hdmi_encoder_enable(struct drm_encoder *encoder)
zx_hdmi_hw_enable(hdmi);
- vou_inf_enable(hdmi->inf, encoder->crtc);
+ vou_inf_enable(VOU_HDMI, encoder->crtc);
}
static void zx_hdmi_encoder_disable(struct drm_encoder *encoder)
{
struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
- vou_inf_disable(hdmi->inf, encoder->crtc);
+ vou_inf_disable(VOU_HDMI, encoder->crtc);
zx_hdmi_hw_disable(hdmi);
@@ -366,6 +362,142 @@ static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
+static int zx_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &hdmi->encoder;
+
+ vou_inf_hdmi_audio_sel(encoder->crtc, VOU_HDMI_AUD_SPDIF);
+
+ return 0;
+}
+
+static void zx_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+
+ /* Disable audio input */
+ hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, 0);
+}
+
+static inline int zx_hdmi_audio_get_n(unsigned int fs)
+{
+ unsigned int n;
+
+ if (fs && (fs % 44100) == 0)
+ n = 6272 * (fs / 44100);
+ else
+ n = fs * 128 / 1000;
+
+ return n;
+}
+
+static int zx_hdmi_audio_hw_params(struct device *dev,
+ void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+ struct hdmi_audio_infoframe *cea = &params->cea;
+ union hdmi_infoframe frame;
+ int n;
+
+ /* We only support spdif for now */
+ if (daifmt->fmt != HDMI_SPDIF) {
+ DRM_DEV_ERROR(hdmi->dev, "invalid daifmt %d\n", daifmt->fmt);
+ return -EINVAL;
+ }
+
+ switch (params->sample_width) {
+ case 16:
+ hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
+ SPDIF_SAMPLE_SIZE_16BIT);
+ break;
+ case 20:
+ hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
+ SPDIF_SAMPLE_SIZE_20BIT);
+ break;
+ case 24:
+ hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
+ SPDIF_SAMPLE_SIZE_24BIT);
+ break;
+ default:
+ DRM_DEV_ERROR(hdmi->dev, "invalid sample width %d\n",
+ params->sample_width);
+ return -EINVAL;
+ }
+
+ /* CTS is calculated by hardware, and we only need to take care of N */
+ n = zx_hdmi_audio_get_n(params->sample_rate);
+ hdmi_writeb(hdmi, N_SVAL1, n & 0xff);
+ hdmi_writeb(hdmi, N_SVAL2, (n >> 8) & 0xff);
+ hdmi_writeb(hdmi, N_SVAL3, (n >> 16) & 0xf);
+
+ /* Enable spdif mode */
+ hdmi_writeb_mask(hdmi, AUD_MODE, SPDIF_EN, SPDIF_EN);
+
+ /* Enable audio input */
+ hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, AUD_IN_EN);
+
+ memcpy(&frame.audio, cea, sizeof(*cea));
+
+ return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AUDIO);
+}
+
+static int zx_hdmi_audio_digital_mute(struct device *dev, void *data,
+ bool enable)
+{
+ struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+
+ if (enable)
+ hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE,
+ TPI_AUD_MUTE);
+ else
+ hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE, 0);
+
+ return 0;
+}
+
+static int zx_hdmi_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+ struct drm_connector *connector = &hdmi->connector;
+
+ memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops zx_hdmi_codec_ops = {
+ .audio_startup = zx_hdmi_audio_startup,
+ .hw_params = zx_hdmi_audio_hw_params,
+ .audio_shutdown = zx_hdmi_audio_shutdown,
+ .digital_mute = zx_hdmi_audio_digital_mute,
+ .get_eld = zx_hdmi_audio_get_eld,
+};
+
+static struct hdmi_codec_pdata zx_hdmi_codec_pdata = {
+ .ops = &zx_hdmi_codec_ops,
+ .spdif = 1,
+};
+
+static int zx_hdmi_audio_register(struct zx_hdmi *hdmi)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(hdmi->dev, HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &zx_hdmi_codec_pdata,
+ sizeof(zx_hdmi_codec_pdata));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ hdmi->audio_pdev = pdev;
+
+ return 0;
+}
+
static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg)
{
int len = msg->len;
@@ -523,7 +655,6 @@ static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi->dev = dev;
hdmi->drm = drm;
- hdmi->inf = &vou_inf_hdmi;
dev_set_drvdata(dev, hdmi);
@@ -566,6 +697,12 @@ static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
+ ret = zx_hdmi_audio_register(hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register audio: %d\n", ret);
+ return ret;
+ }
+
ret = zx_hdmi_register(drm, hdmi);
if (ret) {
DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret);
@@ -590,6 +727,9 @@ static void zx_hdmi_unbind(struct device *dev, struct device *master,
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.funcs->destroy(&hdmi->encoder);
+
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
}
static const struct component_ops zx_hdmi_component_ops = {
diff --git a/drivers/gpu/drm/zte/zx_hdmi_regs.h b/drivers/gpu/drm/zte/zx_hdmi_regs.h
index de911f66b658..c6d5d8211725 100644
--- a/drivers/gpu/drm/zte/zx_hdmi_regs.h
+++ b/drivers/gpu/drm/zte/zx_hdmi_regs.h
@@ -52,5 +52,19 @@
#define TPI_INFO_TRANS_RPT BIT(6)
#define TPI_DDC_MASTER_EN 0x06f8
#define HW_DDC_MASTER BIT(7)
+#define N_SVAL1 0xa03
+#define N_SVAL2 0xa04
+#define N_SVAL3 0xa05
+#define AUD_EN 0xa13
+#define AUD_IN_EN BIT(0)
+#define AUD_MODE 0xa14
+#define SPDIF_EN BIT(1)
+#define TPI_AUD_CONFIG 0xa62
+#define SPDIF_SAMPLE_SIZE_SHIFT 6
+#define SPDIF_SAMPLE_SIZE_MASK (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
+#define SPDIF_SAMPLE_SIZE_16BIT (0x1 << SPDIF_SAMPLE_SIZE_SHIFT)
+#define SPDIF_SAMPLE_SIZE_20BIT (0x2 << SPDIF_SAMPLE_SIZE_SHIFT)
+#define SPDIF_SAMPLE_SIZE_24BIT (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
+#define TPI_AUD_MUTE BIT(4)
#endif /* __ZX_HDMI_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 546eb92a94e8..1d08ba381098 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -21,16 +21,6 @@
#include "zx_plane_regs.h"
#include "zx_vou.h"
-struct zx_plane {
- struct drm_plane plane;
- void __iomem *layer;
- void __iomem *csc;
- void __iomem *hbsc;
- void __iomem *rsz;
-};
-
-#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane)
-
static const uint32_t gl_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
@@ -40,6 +30,261 @@ static const uint32_t gl_formats[] = {
DRM_FORMAT_ARGB4444,
};
+static const uint32_t vl_formats[] = {
+ DRM_FORMAT_NV12, /* Semi-planar YUV420 */
+ DRM_FORMAT_YUV420, /* Planar YUV420 */
+ DRM_FORMAT_YUYV, /* Packed YUV422 */
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YUV444, /* YUV444 8bit */
+ /*
+ * TODO: add formats below that HW supports:
+ * - YUV420 P010
+ * - YUV420 Hantro
+ * - YUV444 10bit
+ */
+};
+
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
+static int zx_vl_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_rect clip;
+ int min_scale = FRAC_16_16(1, 8);
+ int max_scale = FRAC_16_16(8, 1);
+
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+ crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ /* nothing to check when disabling or disabled */
+ if (!crtc_state->enable)
+ return 0;
+
+ /* plane must be enabled */
+ if (!plane_state->crtc)
+ return -EINVAL;
+
+ clip.x1 = 0;
+ clip.y1 = 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+
+ return drm_plane_helper_check_state(plane_state, &clip,
+ min_scale, max_scale,
+ true, true);
+}
+
+static int zx_vl_get_fmt(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_NV12:
+ return VL_FMT_YUV420;
+ case DRM_FORMAT_YUV420:
+ return VL_YUV420_PLANAR | VL_FMT_YUV420;
+ case DRM_FORMAT_YUYV:
+ return VL_YUV422_YUYV | VL_FMT_YUV422;
+ case DRM_FORMAT_YVYU:
+ return VL_YUV422_YVYU | VL_FMT_YUV422;
+ case DRM_FORMAT_UYVY:
+ return VL_YUV422_UYVY | VL_FMT_YUV422;
+ case DRM_FORMAT_VYUY:
+ return VL_YUV422_VYUY | VL_FMT_YUV422;
+ case DRM_FORMAT_YUV444:
+ return VL_FMT_YUV444_8BIT;
+ default:
+ WARN_ONCE(1, "invalid pixel format %d\n", format);
+ return -EINVAL;
+ }
+}
+
+static inline void zx_vl_set_update(struct zx_plane *zplane)
+{
+ void __iomem *layer = zplane->layer;
+
+ zx_writel_mask(layer + VL_CTRL0, VL_UPDATE, VL_UPDATE);
+}
+
+static inline void zx_vl_rsz_set_update(struct zx_plane *zplane)
+{
+ zx_writel(zplane->rsz + RSZ_VL_ENABLE_CFG, 1);
+}
+
+static int zx_vl_rsz_get_fmt(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_YUV420:
+ return RSZ_VL_FMT_YCBCR420;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return RSZ_VL_FMT_YCBCR422;
+ case DRM_FORMAT_YUV444:
+ return RSZ_VL_FMT_YCBCR444;
+ default:
+ WARN_ONCE(1, "invalid pixel format %d\n", format);
+ return -EINVAL;
+ }
+}
+
+static inline u32 rsz_step_value(u32 src, u32 dst)
+{
+ u32 val = 0;
+
+ if (src == dst)
+ val = 0;
+ else if (src < dst)
+ val = RSZ_PARA_STEP((src << 16) / dst);
+ else if (src > dst)
+ val = RSZ_DATA_STEP(src / dst) |
+ RSZ_PARA_STEP(((src << 16) / dst) & 0xffff);
+
+ return val;
+}
+
+static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format,
+ u32 src_w, u32 src_h, u32 dst_w, u32 dst_h)
+{
+ void __iomem *rsz = zplane->rsz;
+ u32 src_chroma_w = src_w;
+ u32 src_chroma_h = src_h;
+ u32 fmt;
+
+ /* Set up source and destination resolution */
+ zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
+ zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
+
+ /* Configure data format for VL RSZ */
+ fmt = zx_vl_rsz_get_fmt(format);
+ if (fmt >= 0)
+ zx_writel_mask(rsz + RSZ_VL_CTRL_CFG, RSZ_VL_FMT_MASK, fmt);
+
+ /* Calculate Chroma height and width */
+ if (fmt == RSZ_VL_FMT_YCBCR420) {
+ src_chroma_w = src_w >> 1;
+ src_chroma_h = src_h >> 1;
+ } else if (fmt == RSZ_VL_FMT_YCBCR422) {
+ src_chroma_w = src_w >> 1;
+ }
+
+ /* Set up Luma and Chroma step registers */
+ zx_writel(rsz + RSZ_VL_LUMA_HOR, rsz_step_value(src_w, dst_w));
+ zx_writel(rsz + RSZ_VL_LUMA_VER, rsz_step_value(src_h, dst_h));
+ zx_writel(rsz + RSZ_VL_CHROMA_HOR, rsz_step_value(src_chroma_w, dst_w));
+ zx_writel(rsz + RSZ_VL_CHROMA_VER, rsz_step_value(src_chroma_h, dst_h));
+
+ zx_vl_rsz_set_update(zplane);
+}
+
+static void zx_vl_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect *src = &state->src;
+ struct drm_rect *dst = &state->dst;
+ struct drm_gem_cma_object *cma_obj;
+ void __iomem *layer = zplane->layer;
+ void __iomem *hbsc = zplane->hbsc;
+ void __iomem *paddr_reg;
+ dma_addr_t paddr;
+ u32 src_x, src_y, src_w, src_h;
+ u32 dst_x, dst_y, dst_w, dst_h;
+ uint32_t format;
+ u32 fmt;
+ int num_planes;
+ int i;
+
+ if (!fb)
+ return;
+
+ format = fb->format->format;
+
+ src_x = src->x1 >> 16;
+ src_y = src->y1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_h = drm_rect_height(src) >> 16;
+
+ dst_x = dst->x1;
+ dst_y = dst->y1;
+ dst_w = drm_rect_width(dst);
+ dst_h = drm_rect_height(dst);
+
+ /* Set up data address registers for Y, Cb and Cr planes */
+ num_planes = drm_format_num_planes(format);
+ paddr_reg = layer + VL_Y;
+ for (i = 0; i < num_planes; i++) {
+ cma_obj = drm_fb_cma_get_gem_obj(fb, i);
+ paddr = cma_obj->paddr + fb->offsets[i];
+ paddr += src_y * fb->pitches[i];
+ paddr += src_x * drm_format_plane_cpp(format, i);
+ zx_writel(paddr_reg, paddr);
+ paddr_reg += 4;
+ }
+
+ /* Set up source height/width register */
+ zx_writel(layer + VL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
+
+ /* Set up start position register */
+ zx_writel(layer + VL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
+
+ /* Set up end position register */
+ zx_writel(layer + VL_POS_END,
+ GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
+
+ /* Strides of Cb and Cr planes should be identical */
+ zx_writel(layer + VL_STRIDE, LUMA_STRIDE(fb->pitches[0]) |
+ CHROMA_STRIDE(fb->pitches[1]));
+
+ /* Set up video layer data format */
+ fmt = zx_vl_get_fmt(format);
+ if (fmt >= 0)
+ zx_writel(layer + VL_CTRL1, fmt);
+
+ /* Always use scaler since it exists (set for not bypass) */
+ zx_writel_mask(layer + VL_CTRL2, VL_SCALER_BYPASS_MODE,
+ VL_SCALER_BYPASS_MODE);
+
+ zx_vl_rsz_setup(zplane, format, src_w, src_h, dst_w, dst_h);
+
+ /* Enable HBSC block */
+ zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
+
+ zx_vou_layer_enable(plane);
+
+ zx_vl_set_update(zplane);
+}
+
+static void zx_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+ void __iomem *hbsc = zplane->hbsc;
+
+ zx_vou_layer_disable(plane);
+
+ /* Disable HBSC block */
+ zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0);
+}
+
+static const struct drm_plane_helper_funcs zx_vl_plane_helper_funcs = {
+ .atomic_check = zx_vl_plane_atomic_check,
+ .atomic_update = zx_vl_plane_atomic_update,
+ .atomic_disable = zx_plane_atomic_disable,
+};
+
static int zx_gl_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
@@ -107,14 +352,6 @@ static inline void zx_gl_rsz_set_update(struct zx_plane *zplane)
zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1);
}
-void zx_plane_set_update(struct drm_plane *plane)
-{
- struct zx_plane *zplane = to_zx_plane(plane);
-
- zx_gl_rsz_set_update(zplane);
- zx_gl_set_update(zplane);
-}
-
static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h,
u32 dst_w, u32 dst_h)
{
@@ -146,7 +383,7 @@ static void zx_gl_plane_atomic_update(struct drm_plane *plane,
if (!fb)
return;
- format = fb->pixel_format;
+ format = fb->format->format;
stride = fb->pitches[0];
src_x = plane->state->src_x >> 16;
@@ -159,7 +396,7 @@ static void zx_gl_plane_atomic_update(struct drm_plane *plane,
dst_w = plane->state->crtc_w;
dst_h = plane->state->crtc_h;
- bpp = drm_format_plane_cpp(format, 0);
+ bpp = fb->format->cpp[0];
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
paddr = cma_obj->paddr + fb->offsets[0];
@@ -207,12 +444,15 @@ static void zx_gl_plane_atomic_update(struct drm_plane *plane,
/* Enable HBSC block */
zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
+ zx_vou_layer_enable(plane);
+
zx_gl_set_update(zplane);
}
static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
.atomic_check = zx_gl_plane_atomic_check,
.atomic_update = zx_gl_plane_atomic_update,
+ .atomic_disable = zx_plane_atomic_disable,
};
static void zx_plane_destroy(struct drm_plane *plane)
@@ -230,6 +470,28 @@ static const struct drm_plane_funcs zx_plane_funcs = {
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
+void zx_plane_set_update(struct drm_plane *plane)
+{
+ struct zx_plane *zplane = to_zx_plane(plane);
+
+ /* Do nothing if the plane is not enabled */
+ if (!plane->state->crtc)
+ return;
+
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ zx_gl_rsz_set_update(zplane);
+ zx_gl_set_update(zplane);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ zx_vl_rsz_set_update(zplane);
+ zx_vl_set_update(zplane);
+ break;
+ default:
+ WARN_ONCE(1, "unsupported plane type %d\n", plane->type);
+ }
+}
+
static void zx_plane_hbsc_init(struct zx_plane *zplane)
{
void __iomem *hbsc = zplane->hbsc;
@@ -248,28 +510,16 @@ static void zx_plane_hbsc_init(struct zx_plane *zplane)
zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40);
}
-struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
- struct zx_layer_data *data,
- enum drm_plane_type type)
+int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
+ enum drm_plane_type type)
{
const struct drm_plane_helper_funcs *helper;
- struct zx_plane *zplane;
- struct drm_plane *plane;
+ struct drm_plane *plane = &zplane->plane;
+ struct device *dev = zplane->dev;
const uint32_t *formats;
unsigned int format_count;
int ret;
- zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
- if (!zplane)
- return ERR_PTR(-ENOMEM);
-
- plane = &zplane->plane;
-
- zplane->layer = data->layer;
- zplane->hbsc = data->hbsc;
- zplane->csc = data->csc;
- zplane->rsz = data->rsz;
-
zx_plane_hbsc_init(zplane);
switch (type) {
@@ -279,10 +529,12 @@ struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
format_count = ARRAY_SIZE(gl_formats);
break;
case DRM_PLANE_TYPE_OVERLAY:
- /* TODO: add video layer (vl) support */
+ helper = &zx_vl_plane_helper_funcs;
+ formats = vl_formats;
+ format_count = ARRAY_SIZE(vl_formats);
break;
default:
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK,
@@ -290,10 +542,10 @@ struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
type, NULL);
if (ret) {
DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret);
- return ERR_PTR(ret);
+ return ret;
}
drm_plane_helper_add(plane, helper);
- return plane;
+ return 0;
}
diff --git a/drivers/gpu/drm/zte/zx_plane.h b/drivers/gpu/drm/zte/zx_plane.h
index 2b82cd558d9d..933611ddffd0 100644
--- a/drivers/gpu/drm/zte/zx_plane.h
+++ b/drivers/gpu/drm/zte/zx_plane.h
@@ -11,16 +11,20 @@
#ifndef __ZX_PLANE_H__
#define __ZX_PLANE_H__
-struct zx_layer_data {
+struct zx_plane {
+ struct drm_plane plane;
+ struct device *dev;
void __iomem *layer;
void __iomem *csc;
void __iomem *hbsc;
void __iomem *rsz;
+ const struct vou_layer_bits *bits;
};
-struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
- struct zx_layer_data *data,
- enum drm_plane_type type);
+#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane)
+
+int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
+ enum drm_plane_type type);
void zx_plane_set_update(struct drm_plane *plane);
#endif /* __ZX_PLANE_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane_regs.h b/drivers/gpu/drm/zte/zx_plane_regs.h
index 3dde6716a558..65f271aeabed 100644
--- a/drivers/gpu/drm/zte/zx_plane_regs.h
+++ b/drivers/gpu/drm/zte/zx_plane_regs.h
@@ -46,6 +46,37 @@
#define GL_POS_X(x) (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK)
#define GL_POS_Y(x) (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK)
+/* VL registers */
+#define VL_CTRL0 0x00
+#define VL_UPDATE BIT(3)
+#define VL_CTRL1 0x04
+#define VL_YUV420_PLANAR BIT(5)
+#define VL_YUV422_SHIFT 3
+#define VL_YUV422_YUYV (0 << VL_YUV422_SHIFT)
+#define VL_YUV422_YVYU (1 << VL_YUV422_SHIFT)
+#define VL_YUV422_UYVY (2 << VL_YUV422_SHIFT)
+#define VL_YUV422_VYUY (3 << VL_YUV422_SHIFT)
+#define VL_FMT_YUV420 0
+#define VL_FMT_YUV422 1
+#define VL_FMT_YUV420_P010 2
+#define VL_FMT_YUV420_HANTRO 3
+#define VL_FMT_YUV444_8BIT 4
+#define VL_FMT_YUV444_10BIT 5
+#define VL_CTRL2 0x08
+#define VL_SCALER_BYPASS_MODE BIT(0)
+#define VL_STRIDE 0x0c
+#define LUMA_STRIDE_SHIFT 16
+#define LUMA_STRIDE_MASK (0xffff << LUMA_STRIDE_SHIFT)
+#define CHROMA_STRIDE_SHIFT 0
+#define CHROMA_STRIDE_MASK (0xffff << CHROMA_STRIDE_SHIFT)
+#define VL_SRC_SIZE 0x10
+#define VL_Y 0x14
+#define VL_POS_START 0x30
+#define VL_POS_END 0x34
+
+#define LUMA_STRIDE(x) (((x) << LUMA_STRIDE_SHIFT) & LUMA_STRIDE_MASK)
+#define CHROMA_STRIDE(x) (((x) << CHROMA_STRIDE_SHIFT) & CHROMA_STRIDE_MASK)
+
/* CSC registers */
#define CSC_CTRL0 0x30
#define CSC_COV_MODE_SHIFT 16
@@ -69,6 +100,18 @@
#define RSZ_DEST_CFG 0x04
#define RSZ_ENABLE_CFG 0x14
+#define RSZ_VL_LUMA_HOR 0x08
+#define RSZ_VL_LUMA_VER 0x0c
+#define RSZ_VL_CHROMA_HOR 0x10
+#define RSZ_VL_CHROMA_VER 0x14
+#define RSZ_VL_CTRL_CFG 0x18
+#define RSZ_VL_FMT_SHIFT 3
+#define RSZ_VL_FMT_MASK (0x3 << RSZ_VL_FMT_SHIFT)
+#define RSZ_VL_FMT_YCBCR420 (0x0 << RSZ_VL_FMT_SHIFT)
+#define RSZ_VL_FMT_YCBCR422 (0x1 << RSZ_VL_FMT_SHIFT)
+#define RSZ_VL_FMT_YCBCR444 (0x2 << RSZ_VL_FMT_SHIFT)
+#define RSZ_VL_ENABLE_CFG 0x1c
+
#define RSZ_VER_SHIFT 16
#define RSZ_VER_MASK (0xffff << RSZ_VER_SHIFT)
#define RSZ_HOR_SHIFT 0
@@ -77,6 +120,14 @@
#define RSZ_VER(x) (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK)
#define RSZ_HOR(x) (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK)
+#define RSZ_DATA_STEP_SHIFT 16
+#define RSZ_DATA_STEP_MASK (0xffff << RSZ_DATA_STEP_SHIFT)
+#define RSZ_PARA_STEP_SHIFT 0
+#define RSZ_PARA_STEP_MASK (0xffff << RSZ_PARA_STEP_SHIFT)
+
+#define RSZ_DATA_STEP(x) (((x) << RSZ_DATA_STEP_SHIFT) & RSZ_DATA_STEP_MASK)
+#define RSZ_PARA_STEP(x) (((x) << RSZ_PARA_STEP_SHIFT) & RSZ_PARA_STEP_MASK)
+
/* HBSC registers */
#define HBSC_SATURATION 0x00
#define HBSC_HUE 0x04
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
new file mode 100644
index 000000000000..b56dc69843fc
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_tvenc.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2017 Linaro Ltd.
+ * Copyright 2017 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_tvenc_regs.h"
+#include "zx_vou.h"
+
+struct zx_tvenc_pwrctrl {
+ struct regmap *regmap;
+ u32 reg;
+ u32 mask;
+};
+
+struct zx_tvenc {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct device *dev;
+ void __iomem *mmio;
+ const struct vou_inf *inf;
+ struct zx_tvenc_pwrctrl pwrctrl;
+};
+
+#define to_zx_tvenc(x) container_of(x, struct zx_tvenc, x)
+
+struct zx_tvenc_mode {
+ struct drm_display_mode mode;
+ u32 video_info;
+ u32 video_res;
+ u32 field1_param;
+ u32 field2_param;
+ u32 burst_line_odd1;
+ u32 burst_line_even1;
+ u32 burst_line_odd2;
+ u32 burst_line_even2;
+ u32 line_timing_param;
+ u32 weight_value;
+ u32 blank_black_level;
+ u32 burst_level;
+ u32 control_param;
+ u32 sub_carrier_phase1;
+ u32 phase_line_incr_cvbs;
+};
+
+/*
+ * The CRM cannot directly provide a suitable frequency, and we have to
+ * ask a multiplied rate from CRM and use the divider in VOU to get the
+ * desired one.
+ */
+#define TVENC_CLOCK_MULTIPLIER 4
+
+static const struct zx_tvenc_mode tvenc_mode_pal = {
+ .mode = {
+ .clock = 13500 * TVENC_CLOCK_MULTIPLIER,
+ .hdisplay = 720,
+ .hsync_start = 720 + 12,
+ .hsync_end = 720 + 12 + 2,
+ .htotal = 720 + 12 + 2 + 130,
+ .vdisplay = 576,
+ .vsync_start = 576 + 2,
+ .vsync_end = 576 + 2 + 2,
+ .vtotal = 576 + 2 + 2 + 20,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE,
+ },
+ .video_info = 0x00040040,
+ .video_res = 0x05a9c760,
+ .field1_param = 0x0004d416,
+ .field2_param = 0x0009b94f,
+ .burst_line_odd1 = 0x0004d406,
+ .burst_line_even1 = 0x0009b53e,
+ .burst_line_odd2 = 0x0004d805,
+ .burst_line_even2 = 0x0009b93f,
+ .line_timing_param = 0x06a96fdf,
+ .weight_value = 0x00c188a0,
+ .blank_black_level = 0x0000fcfc,
+ .burst_level = 0x00001595,
+ .control_param = 0x00000001,
+ .sub_carrier_phase1 = 0x1504c566,
+ .phase_line_incr_cvbs = 0xc068db8c,
+};
+
+static const struct zx_tvenc_mode tvenc_mode_ntsc = {
+ .mode = {
+ .clock = 13500 * TVENC_CLOCK_MULTIPLIER,
+ .hdisplay = 720,
+ .hsync_start = 720 + 16,
+ .hsync_end = 720 + 16 + 2,
+ .htotal = 720 + 16 + 2 + 120,
+ .vdisplay = 480,
+ .vsync_start = 480 + 3,
+ .vsync_end = 480 + 3 + 2,
+ .vtotal = 480 + 3 + 2 + 17,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE,
+ },
+ .video_info = 0x00040080,
+ .video_res = 0x05a8375a,
+ .field1_param = 0x00041817,
+ .field2_param = 0x0008351e,
+ .burst_line_odd1 = 0x00041006,
+ .burst_line_even1 = 0x0008290d,
+ .burst_line_odd2 = 0x00000000,
+ .burst_line_even2 = 0x00000000,
+ .line_timing_param = 0x06a8ef9e,
+ .weight_value = 0x00b68197,
+ .blank_black_level = 0x0000f0f0,
+ .burst_level = 0x0000009c,
+ .control_param = 0x00000001,
+ .sub_carrier_phase1 = 0x10f83e10,
+ .phase_line_incr_cvbs = 0x80000000,
+};
+
+static const struct zx_tvenc_mode *tvenc_modes[] = {
+ &tvenc_mode_pal,
+ &tvenc_mode_ntsc,
+};
+
+static const struct zx_tvenc_mode *
+zx_tvenc_find_zmode(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) {
+ const struct zx_tvenc_mode *zmode = tvenc_modes[i];
+
+ if (drm_mode_equal(mode, &zmode->mode))
+ return zmode;
+ }
+
+ return NULL;
+}
+
+static void zx_tvenc_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
+ const struct zx_tvenc_mode *zmode;
+ struct vou_div_config configs[] = {
+ { VOU_DIV_INF, VOU_DIV_4 },
+ { VOU_DIV_TVENC, VOU_DIV_1 },
+ { VOU_DIV_LAYER, VOU_DIV_2 },
+ };
+
+ zx_vou_config_dividers(encoder->crtc, configs, ARRAY_SIZE(configs));
+
+ zmode = zx_tvenc_find_zmode(mode);
+ if (!zmode) {
+ DRM_DEV_ERROR(tvenc->dev, "failed to find zmode\n");
+ return;
+ }
+
+ zx_writel(tvenc->mmio + VENC_VIDEO_INFO, zmode->video_info);
+ zx_writel(tvenc->mmio + VENC_VIDEO_RES, zmode->video_res);
+ zx_writel(tvenc->mmio + VENC_FIELD1_PARAM, zmode->field1_param);
+ zx_writel(tvenc->mmio + VENC_FIELD2_PARAM, zmode->field2_param);
+ zx_writel(tvenc->mmio + VENC_LINE_O_1, zmode->burst_line_odd1);
+ zx_writel(tvenc->mmio + VENC_LINE_E_1, zmode->burst_line_even1);
+ zx_writel(tvenc->mmio + VENC_LINE_O_2, zmode->burst_line_odd2);
+ zx_writel(tvenc->mmio + VENC_LINE_E_2, zmode->burst_line_even2);
+ zx_writel(tvenc->mmio + VENC_LINE_TIMING_PARAM,
+ zmode->line_timing_param);
+ zx_writel(tvenc->mmio + VENC_WEIGHT_VALUE, zmode->weight_value);
+ zx_writel(tvenc->mmio + VENC_BLANK_BLACK_LEVEL,
+ zmode->blank_black_level);
+ zx_writel(tvenc->mmio + VENC_BURST_LEVEL, zmode->burst_level);
+ zx_writel(tvenc->mmio + VENC_CONTROL_PARAM, zmode->control_param);
+ zx_writel(tvenc->mmio + VENC_SUB_CARRIER_PHASE1,
+ zmode->sub_carrier_phase1);
+ zx_writel(tvenc->mmio + VENC_PHASE_LINE_INCR_CVBS,
+ zmode->phase_line_incr_cvbs);
+}
+
+static void zx_tvenc_encoder_enable(struct drm_encoder *encoder)
+{
+ struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
+ struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
+
+ /* Set bit to power up TVENC DAC */
+ regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask,
+ pwrctrl->mask);
+
+ vou_inf_enable(VOU_TV_ENC, encoder->crtc);
+
+ zx_writel(tvenc->mmio + VENC_ENABLE, 1);
+}
+
+static void zx_tvenc_encoder_disable(struct drm_encoder *encoder)
+{
+ struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
+ struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
+
+ zx_writel(tvenc->mmio + VENC_ENABLE, 0);
+
+ vou_inf_disable(VOU_TV_ENC, encoder->crtc);
+
+ /* Clear bit to power down TVENC DAC */
+ regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0);
+}
+
+static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = {
+ .enable = zx_tvenc_encoder_enable,
+ .disable = zx_tvenc_encoder_disable,
+ .mode_set = zx_tvenc_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs zx_tvenc_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int zx_tvenc_connector_get_modes(struct drm_connector *connector)
+{
+ struct zx_tvenc *tvenc = to_zx_tvenc(connector);
+ struct device *dev = tvenc->dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) {
+ const struct zx_tvenc_mode *zmode = tvenc_modes[i];
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &zmode->mode);
+ if (!mode) {
+ DRM_DEV_ERROR(dev, "failed to duplicate drm mode\n");
+ continue;
+ }
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+
+static enum drm_mode_status
+zx_tvenc_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct zx_tvenc *tvenc = to_zx_tvenc(connector);
+ const struct zx_tvenc_mode *zmode;
+
+ zmode = zx_tvenc_find_zmode(mode);
+ if (!zmode) {
+ DRM_DEV_ERROR(tvenc->dev, "unsupported mode: %s\n", mode->name);
+ return MODE_NOMODE;
+ }
+
+ return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs zx_tvenc_connector_helper_funcs = {
+ .get_modes = zx_tvenc_connector_get_modes,
+ .mode_valid = zx_tvenc_connector_mode_valid,
+};
+
+static const struct drm_connector_funcs zx_tvenc_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
+{
+ struct drm_encoder *encoder = &tvenc->encoder;
+ struct drm_connector *connector = &tvenc->connector;
+
+ /*
+ * The tvenc is designed to use aux channel, as there is a deflicker
+ * block for the channel.
+ */
+ encoder->possible_crtcs = BIT(1);
+
+ drm_encoder_init(drm, encoder, &zx_tvenc_encoder_funcs,
+ DRM_MODE_ENCODER_TVDAC, NULL);
+ drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs);
+
+ connector->interlace_allowed = true;
+
+ drm_connector_init(drm, connector, &zx_tvenc_connector_funcs,
+ DRM_MODE_CONNECTOR_Composite);
+ drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static int zx_tvenc_pwrctrl_init(struct zx_tvenc *tvenc)
+{
+ struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
+ struct device *dev = tvenc->dev;
+ struct of_phandle_args out_args;
+ struct regmap *regmap;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(dev->of_node,
+ "zte,tvenc-power-control", 2, 0, &out_args);
+ if (ret)
+ return ret;
+
+ regmap = syscon_node_to_regmap(out_args.np);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto out;
+ }
+
+ pwrctrl->regmap = regmap;
+ pwrctrl->reg = out_args.args[0];
+ pwrctrl->mask = out_args.args[1];
+
+out:
+ of_node_put(out_args.np);
+ return ret;
+}
+
+static int zx_tvenc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct resource *res;
+ struct zx_tvenc *tvenc;
+ int ret;
+
+ tvenc = devm_kzalloc(dev, sizeof(*tvenc), GFP_KERNEL);
+ if (!tvenc)
+ return -ENOMEM;
+
+ tvenc->dev = dev;
+ dev_set_drvdata(dev, tvenc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tvenc->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(tvenc->mmio)) {
+ ret = PTR_ERR(tvenc->mmio);
+ DRM_DEV_ERROR(dev, "failed to remap tvenc region: %d\n", ret);
+ return ret;
+ }
+
+ ret = zx_tvenc_pwrctrl_init(tvenc);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret);
+ return ret;
+ }
+
+ ret = zx_tvenc_register(drm, tvenc);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register tvenc: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void zx_tvenc_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ /* Nothing to do */
+}
+
+static const struct component_ops zx_tvenc_component_ops = {
+ .bind = zx_tvenc_bind,
+ .unbind = zx_tvenc_unbind,
+};
+
+static int zx_tvenc_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &zx_tvenc_component_ops);
+}
+
+static int zx_tvenc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &zx_tvenc_component_ops);
+ return 0;
+}
+
+static const struct of_device_id zx_tvenc_of_match[] = {
+ { .compatible = "zte,zx296718-tvenc", },
+ { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_tvenc_of_match);
+
+struct platform_driver zx_tvenc_driver = {
+ .probe = zx_tvenc_probe,
+ .remove = zx_tvenc_remove,
+ .driver = {
+ .name = "zx-tvenc",
+ .of_match_table = zx_tvenc_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/zte/zx_tvenc_regs.h b/drivers/gpu/drm/zte/zx_tvenc_regs.h
new file mode 100644
index 000000000000..bd91f5dcc1f3
--- /dev/null
+++ b/drivers/gpu/drm/zte/zx_tvenc_regs.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2017 Linaro Ltd.
+ * Copyright 2017 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_TVENC_REGS_H__
+#define __ZX_TVENC_REGS_H__
+
+#define VENC_VIDEO_INFO 0x04
+#define VENC_VIDEO_RES 0x08
+#define VENC_FIELD1_PARAM 0x10
+#define VENC_FIELD2_PARAM 0x14
+#define VENC_LINE_O_1 0x18
+#define VENC_LINE_E_1 0x1c
+#define VENC_LINE_O_2 0x20
+#define VENC_LINE_E_2 0x24
+#define VENC_LINE_TIMING_PARAM 0x28
+#define VENC_WEIGHT_VALUE 0x2c
+#define VENC_BLANK_BLACK_LEVEL 0x30
+#define VENC_BURST_LEVEL 0x34
+#define VENC_CONTROL_PARAM 0x3c
+#define VENC_SUB_CARRIER_PHASE1 0x40
+#define VENC_PHASE_LINE_INCR_CVBS 0x48
+#define VENC_ENABLE 0xa8
+
+#endif /* __ZX_TVENC_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
index 73fe15c17c32..cf92d675feaa 100644
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -40,6 +40,7 @@ struct zx_crtc_regs {
u32 fir_active;
u32 fir_htiming;
u32 fir_vtiming;
+ u32 sec_vtiming;
u32 timing_shift;
u32 timing_pi_shift;
};
@@ -48,6 +49,7 @@ static const struct zx_crtc_regs main_crtc_regs = {
.fir_active = FIR_MAIN_ACTIVE,
.fir_htiming = FIR_MAIN_H_TIMING,
.fir_vtiming = FIR_MAIN_V_TIMING,
+ .sec_vtiming = SEC_MAIN_V_TIMING,
.timing_shift = TIMING_MAIN_SHIFT,
.timing_pi_shift = TIMING_MAIN_PI_SHIFT,
};
@@ -56,6 +58,7 @@ static const struct zx_crtc_regs aux_crtc_regs = {
.fir_active = FIR_AUX_ACTIVE,
.fir_htiming = FIR_AUX_H_TIMING,
.fir_vtiming = FIR_AUX_V_TIMING,
+ .sec_vtiming = SEC_AUX_V_TIMING,
.timing_shift = TIMING_AUX_SHIFT,
.timing_pi_shift = TIMING_AUX_PI_SHIFT,
};
@@ -65,7 +68,17 @@ struct zx_crtc_bits {
u32 polarity_shift;
u32 int_frame_mask;
u32 tc_enable;
- u32 gl_enable;
+ u32 sec_vactive_shift;
+ u32 sec_vactive_mask;
+ u32 interlace_select;
+ u32 pi_enable;
+ u32 div_vga_shift;
+ u32 div_pic_shift;
+ u32 div_tvenc_shift;
+ u32 div_hdmi_pnx_shift;
+ u32 div_hdmi_shift;
+ u32 div_inf_shift;
+ u32 div_layer_shift;
};
static const struct zx_crtc_bits main_crtc_bits = {
@@ -73,7 +86,17 @@ static const struct zx_crtc_bits main_crtc_bits = {
.polarity_shift = MAIN_POL_SHIFT,
.int_frame_mask = TIMING_INT_MAIN_FRAME,
.tc_enable = MAIN_TC_EN,
- .gl_enable = OSD_CTRL0_GL0_EN,
+ .sec_vactive_shift = SEC_VACT_MAIN_SHIFT,
+ .sec_vactive_mask = SEC_VACT_MAIN_MASK,
+ .interlace_select = MAIN_INTERLACE_SEL,
+ .pi_enable = MAIN_PI_EN,
+ .div_vga_shift = VGA_MAIN_DIV_SHIFT,
+ .div_pic_shift = PIC_MAIN_DIV_SHIFT,
+ .div_tvenc_shift = TVENC_MAIN_DIV_SHIFT,
+ .div_hdmi_pnx_shift = HDMI_MAIN_PNX_DIV_SHIFT,
+ .div_hdmi_shift = HDMI_MAIN_DIV_SHIFT,
+ .div_inf_shift = INF_MAIN_DIV_SHIFT,
+ .div_layer_shift = LAYER_MAIN_DIV_SHIFT,
};
static const struct zx_crtc_bits aux_crtc_bits = {
@@ -81,7 +104,17 @@ static const struct zx_crtc_bits aux_crtc_bits = {
.polarity_shift = AUX_POL_SHIFT,
.int_frame_mask = TIMING_INT_AUX_FRAME,
.tc_enable = AUX_TC_EN,
- .gl_enable = OSD_CTRL0_GL1_EN,
+ .sec_vactive_shift = SEC_VACT_AUX_SHIFT,
+ .sec_vactive_mask = SEC_VACT_AUX_MASK,
+ .interlace_select = AUX_INTERLACE_SEL,
+ .pi_enable = AUX_PI_EN,
+ .div_vga_shift = VGA_AUX_DIV_SHIFT,
+ .div_pic_shift = PIC_AUX_DIV_SHIFT,
+ .div_tvenc_shift = TVENC_AUX_DIV_SHIFT,
+ .div_hdmi_pnx_shift = HDMI_AUX_PNX_DIV_SHIFT,
+ .div_hdmi_shift = HDMI_AUX_DIV_SHIFT,
+ .div_inf_shift = INF_AUX_DIV_SHIFT,
+ .div_layer_shift = LAYER_AUX_DIV_SHIFT,
};
struct zx_crtc {
@@ -97,6 +130,40 @@ struct zx_crtc {
#define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc)
+struct vou_layer_bits {
+ u32 enable;
+ u32 chnsel;
+ u32 clksel;
+};
+
+static const struct vou_layer_bits zx_gl_bits[GL_NUM] = {
+ {
+ .enable = OSD_CTRL0_GL0_EN,
+ .chnsel = OSD_CTRL0_GL0_SEL,
+ .clksel = VOU_CLK_GL0_SEL,
+ }, {
+ .enable = OSD_CTRL0_GL1_EN,
+ .chnsel = OSD_CTRL0_GL1_SEL,
+ .clksel = VOU_CLK_GL1_SEL,
+ },
+};
+
+static const struct vou_layer_bits zx_vl_bits[VL_NUM] = {
+ {
+ .enable = OSD_CTRL0_VL0_EN,
+ .chnsel = OSD_CTRL0_VL0_SEL,
+ .clksel = VOU_CLK_VL0_SEL,
+ }, {
+ .enable = OSD_CTRL0_VL1_EN,
+ .chnsel = OSD_CTRL0_VL1_SEL,
+ .clksel = VOU_CLK_VL1_SEL,
+ }, {
+ .enable = OSD_CTRL0_VL2_EN,
+ .chnsel = OSD_CTRL0_VL2_SEL,
+ .clksel = VOU_CLK_VL2_SEL,
+ },
+};
+
struct zx_vou_hw {
struct device *dev;
void __iomem *osd;
@@ -112,6 +179,33 @@ struct zx_vou_hw {
struct zx_crtc *aux_crtc;
};
+enum vou_inf_data_sel {
+ VOU_YUV444 = 0,
+ VOU_RGB_101010 = 1,
+ VOU_RGB_888 = 2,
+ VOU_RGB_666 = 3,
+};
+
+struct vou_inf {
+ enum vou_inf_id id;
+ enum vou_inf_data_sel data_sel;
+ u32 clocks_en_bits;
+ u32 clocks_sel_bits;
+};
+
+static struct vou_inf vou_infs[] = {
+ [VOU_HDMI] = {
+ .data_sel = VOU_YUV444,
+ .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
+ .clocks_sel_bits = BIT(13) | BIT(2),
+ },
+ [VOU_TV_ENC] = {
+ .data_sel = VOU_YUV444,
+ .clocks_en_bits = BIT(15),
+ .clocks_sel_bits = BIT(11) | BIT(0),
+ },
+};
+
static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
{
struct zx_crtc *zcrtc = to_zx_crtc(crtc);
@@ -119,20 +213,30 @@ static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
return zcrtc->vou;
}
-void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc)
+void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc,
+ enum vou_inf_hdmi_audio aud)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+
+ zx_writel_mask(vou->vouctl + VOU_INF_HDMI_CTRL, VOU_HDMI_AUD_MASK, aud);
+}
+
+void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc)
{
struct zx_crtc *zcrtc = to_zx_crtc(crtc);
struct zx_vou_hw *vou = zcrtc->vou;
+ struct vou_inf *inf = &vou_infs[id];
bool is_main = zcrtc->chn_type == VOU_CHN_MAIN;
- u32 data_sel_shift = inf->id << 1;
+ u32 data_sel_shift = id << 1;
/* Select data format */
zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift,
inf->data_sel << data_sel_shift);
/* Select channel */
- zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << inf->id,
- zcrtc->chn_type << inf->id);
+ zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << id,
+ zcrtc->chn_type << id);
/* Select interface clocks */
zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits,
@@ -143,20 +247,79 @@ void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc)
inf->clocks_en_bits);
/* Enable the device */
- zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 1 << inf->id);
+ zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 1 << id);
}
-void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc)
+void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc)
{
struct zx_vou_hw *vou = crtc_to_vou(crtc);
+ struct vou_inf *inf = &vou_infs[id];
/* Disable the device */
- zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 0);
+ zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 0);
/* Disable interface clocks */
zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0);
}
+void zx_vou_config_dividers(struct drm_crtc *crtc,
+ struct vou_div_config *configs, int num)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ const struct zx_crtc_bits *bits = zcrtc->bits;
+ int i;
+
+ /* Clear update flag bit */
+ zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE, 0);
+
+ for (i = 0; i < num; i++) {
+ struct vou_div_config *cfg = configs + i;
+ u32 reg, shift;
+
+ switch (cfg->id) {
+ case VOU_DIV_VGA:
+ reg = VOU_CLK_SEL;
+ shift = bits->div_vga_shift;
+ break;
+ case VOU_DIV_PIC:
+ reg = VOU_CLK_SEL;
+ shift = bits->div_pic_shift;
+ break;
+ case VOU_DIV_TVENC:
+ reg = VOU_DIV_PARA;
+ shift = bits->div_tvenc_shift;
+ break;
+ case VOU_DIV_HDMI_PNX:
+ reg = VOU_DIV_PARA;
+ shift = bits->div_hdmi_pnx_shift;
+ break;
+ case VOU_DIV_HDMI:
+ reg = VOU_DIV_PARA;
+ shift = bits->div_hdmi_shift;
+ break;
+ case VOU_DIV_INF:
+ reg = VOU_DIV_PARA;
+ shift = bits->div_inf_shift;
+ break;
+ case VOU_DIV_LAYER:
+ reg = VOU_DIV_PARA;
+ shift = bits->div_layer_shift;
+ break;
+ default:
+ continue;
+ }
+
+ /* Each divider occupies 3 bits */
+ zx_writel_mask(vou->vouctl + reg, 0x7 << shift,
+ cfg->val << shift);
+ }
+
+ /* Set update flag bit to get dividers effected */
+ zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE,
+ DIV_PARA_UPDATE);
+}
+
static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
{
zx_writel(zcrtc->chnreg + CHN_UPDATE, 1);
@@ -165,11 +328,13 @@ static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
static void zx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
struct zx_crtc *zcrtc = to_zx_crtc(crtc);
struct zx_vou_hw *vou = zcrtc->vou;
const struct zx_crtc_regs *regs = zcrtc->regs;
const struct zx_crtc_bits *bits = zcrtc->bits;
struct videomode vm;
+ u32 scan_mask;
u32 pol = 0;
u32 val;
int ret;
@@ -177,7 +342,7 @@ static void zx_crtc_enable(struct drm_crtc *crtc)
drm_display_mode_to_videomode(mode, &vm);
/* Set up timing parameters */
- val = V_ACTIVE(vm.vactive - 1);
+ val = V_ACTIVE((interlaced ? vm.vactive / 2 : vm.vactive) - 1);
val |= H_ACTIVE(vm.hactive - 1);
zx_writel(vou->timing + regs->fir_active, val);
@@ -191,6 +356,25 @@ static void zx_crtc_enable(struct drm_crtc *crtc)
val |= FRONT_PORCH(vm.vfront_porch - 1);
zx_writel(vou->timing + regs->fir_vtiming, val);
+ if (interlaced) {
+ u32 shift = bits->sec_vactive_shift;
+ u32 mask = bits->sec_vactive_mask;
+
+ val = zx_readl(vou->timing + SEC_V_ACTIVE);
+ val &= ~mask;
+ val |= ((vm.vactive / 2 - 1) << shift) & mask;
+ zx_writel(vou->timing + SEC_V_ACTIVE, val);
+
+ val = SYNC_WIDE(vm.vsync_len - 1);
+ /*
+ * The vback_porch for the second field needs to shift one on
+ * the value for the first field.
+ */
+ val |= BACK_PORCH(vm.vback_porch);
+ val |= FRONT_PORCH(vm.vfront_porch - 1);
+ zx_writel(vou->timing + regs->sec_vtiming, val);
+ }
+
/* Set up polarities */
if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
pol |= 1 << POL_VSYNC_SHIFT;
@@ -201,9 +385,17 @@ static void zx_crtc_enable(struct drm_crtc *crtc)
pol << bits->polarity_shift);
/* Setup SHIFT register by following what ZTE BSP does */
- zx_writel(vou->timing + regs->timing_shift, H_SHIFT_VAL);
+ val = H_SHIFT_VAL;
+ if (interlaced)
+ val |= V_SHIFT_VAL << 16;
+ zx_writel(vou->timing + regs->timing_shift, val);
zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL);
+ /* Progressive or interlace scan select */
+ scan_mask = bits->interlace_select | bits->pi_enable;
+ zx_writel_mask(vou->timing + SCAN_CTRL, scan_mask,
+ interlaced ? scan_mask : 0);
+
/* Enable TIMING_CTRL */
zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable,
bits->tc_enable);
@@ -214,16 +406,16 @@ static void zx_crtc_enable(struct drm_crtc *crtc)
zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK,
vm.vactive << CHN_SCREEN_H_SHIFT);
+ /* Configure channel interlace buffer control */
+ zx_writel_mask(zcrtc->chnreg + CHN_INTERLACE_BUF_CTRL, CHN_INTERLACE_EN,
+ interlaced ? CHN_INTERLACE_EN : 0);
+
/* Update channel */
vou_chn_set_update(zcrtc);
/* Enable channel */
zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE);
- /* Enable Graphic Layer */
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable,
- bits->gl_enable);
-
drm_crtc_vblank_on(crtc);
ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000);
@@ -247,9 +439,6 @@ static void zx_crtc_disable(struct drm_crtc *crtc)
drm_crtc_vblank_off(crtc);
- /* Disable Graphic Layer */
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable, 0);
-
/* Disable channel */
zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0);
@@ -294,7 +483,7 @@ static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
enum vou_chn_type chn_type)
{
struct device *dev = vou->dev;
- struct zx_layer_data data;
+ struct zx_plane *zplane;
struct zx_crtc *zcrtc;
int ret;
@@ -305,19 +494,27 @@ static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
zcrtc->vou = vou;
zcrtc->chn_type = chn_type;
+ zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
+ if (!zplane)
+ return -ENOMEM;
+
+ zplane->dev = dev;
+
if (chn_type == VOU_CHN_MAIN) {
- data.layer = vou->osd + MAIN_GL_OFFSET;
- data.csc = vou->osd + MAIN_CSC_OFFSET;
- data.hbsc = vou->osd + MAIN_HBSC_OFFSET;
- data.rsz = vou->otfppu + MAIN_RSZ_OFFSET;
+ zplane->layer = vou->osd + MAIN_GL_OFFSET;
+ zplane->csc = vou->osd + MAIN_CSC_OFFSET;
+ zplane->hbsc = vou->osd + MAIN_HBSC_OFFSET;
+ zplane->rsz = vou->otfppu + MAIN_RSZ_OFFSET;
+ zplane->bits = &zx_gl_bits[0];
zcrtc->chnreg = vou->osd + OSD_MAIN_CHN;
zcrtc->regs = &main_crtc_regs;
zcrtc->bits = &main_crtc_bits;
} else {
- data.layer = vou->osd + AUX_GL_OFFSET;
- data.csc = vou->osd + AUX_CSC_OFFSET;
- data.hbsc = vou->osd + AUX_HBSC_OFFSET;
- data.rsz = vou->otfppu + AUX_RSZ_OFFSET;
+ zplane->layer = vou->osd + AUX_GL_OFFSET;
+ zplane->csc = vou->osd + AUX_CSC_OFFSET;
+ zplane->hbsc = vou->osd + AUX_HBSC_OFFSET;
+ zplane->rsz = vou->otfppu + AUX_RSZ_OFFSET;
+ zplane->bits = &zx_gl_bits[1];
zcrtc->chnreg = vou->osd + OSD_AUX_CHN;
zcrtc->regs = &aux_crtc_regs;
zcrtc->bits = &aux_crtc_bits;
@@ -331,13 +528,14 @@ static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
return ret;
}
- zcrtc->primary = zx_plane_init(drm, dev, &data, DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(zcrtc->primary)) {
- ret = PTR_ERR(zcrtc->primary);
+ ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_PRIMARY);
+ if (ret) {
DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret);
return ret;
}
+ zcrtc->primary = &zplane->plane;
+
ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL,
&zx_crtc_funcs, NULL);
if (ret) {
@@ -355,17 +553,6 @@ static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
return 0;
}
-static inline struct drm_crtc *zx_find_crtc(struct drm_device *drm, int pipe)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
- if (crtc->index == pipe)
- return crtc;
-
- return NULL;
-}
-
int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe)
{
struct drm_crtc *crtc;
@@ -373,7 +560,7 @@ int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe)
struct zx_vou_hw *vou;
u32 int_frame_mask;
- crtc = zx_find_crtc(drm, pipe);
+ crtc = drm_crtc_from_index(drm, pipe);
if (!crtc)
return 0;
@@ -393,7 +580,7 @@ void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe)
struct zx_crtc *zcrtc;
struct zx_vou_hw *vou;
- crtc = zx_find_crtc(drm, pipe);
+ crtc = drm_crtc_from_index(drm, pipe);
if (!crtc)
return;
@@ -404,6 +591,78 @@ void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe)
zcrtc->bits->int_frame_mask, 0);
}
+void zx_vou_layer_enable(struct drm_plane *plane)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(plane->state->crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ struct zx_plane *zplane = to_zx_plane(plane);
+ const struct vou_layer_bits *bits = zplane->bits;
+
+ if (zcrtc->chn_type == VOU_CHN_MAIN) {
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel, 0);
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel, 0);
+ } else {
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel,
+ bits->chnsel);
+ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel,
+ bits->clksel);
+ }
+
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable);
+}
+
+void zx_vou_layer_disable(struct drm_plane *plane)
+{
+ struct zx_crtc *zcrtc = to_zx_crtc(plane->crtc);
+ struct zx_vou_hw *vou = zcrtc->vou;
+ struct zx_plane *zplane = to_zx_plane(plane);
+ const struct vou_layer_bits *bits = zplane->bits;
+
+ zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, 0);
+}
+
+static void zx_overlay_init(struct drm_device *drm, struct zx_vou_hw *vou)
+{
+ struct device *dev = vou->dev;
+ struct zx_plane *zplane;
+ int i;
+ int ret;
+
+ /*
+ * VL0 has some quirks on scaling support which need special handling.
+ * Let's leave it out for now.
+ */
+ for (i = 1; i < VL_NUM; i++) {
+ zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
+ if (!zplane) {
+ DRM_DEV_ERROR(dev, "failed to allocate zplane %d\n", i);
+ return;
+ }
+
+ zplane->layer = vou->osd + OSD_VL_OFFSET(i);
+ zplane->hbsc = vou->osd + HBSC_VL_OFFSET(i);
+ zplane->rsz = vou->otfppu + RSZ_VL_OFFSET(i);
+ zplane->bits = &zx_vl_bits[i];
+
+ ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_OVERLAY);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to init overlay %d\n", i);
+ continue;
+ }
+ }
+}
+
+static inline void zx_osd_int_update(struct zx_crtc *zcrtc)
+{
+ struct drm_crtc *crtc = &zcrtc->crtc;
+ struct drm_plane *plane;
+
+ vou_chn_set_update(zcrtc);
+
+ drm_for_each_plane_mask(plane, crtc->dev, crtc->state->plane_mask)
+ zx_plane_set_update(plane);
+}
+
static irqreturn_t vou_irq_handler(int irq, void *dev_id)
{
struct zx_vou_hw *vou = dev_id;
@@ -423,15 +682,11 @@ static irqreturn_t vou_irq_handler(int irq, void *dev_id)
state = zx_readl(vou->osd + OSD_INT_STA);
zx_writel(vou->osd + OSD_INT_CLRSTA, state);
- if (state & OSD_INT_MAIN_UPT) {
- vou_chn_set_update(vou->main_crtc);
- zx_plane_set_update(vou->main_crtc->primary);
- }
+ if (state & OSD_INT_MAIN_UPT)
+ zx_osd_int_update(vou->main_crtc);
- if (state & OSD_INT_AUX_UPT) {
- vou_chn_set_update(vou->aux_crtc);
- zx_plane_set_update(vou->aux_crtc->primary);
- }
+ if (state & OSD_INT_AUX_UPT)
+ zx_osd_int_update(vou->aux_crtc);
if (state & OSD_INT_ERROR)
DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state);
@@ -462,19 +717,9 @@ static void vou_dtrc_init(struct zx_vou_hw *vou)
static void vou_hw_init(struct zx_vou_hw *vou)
{
- /* Set GL0 to main channel and GL1 to aux channel */
- zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL0_SEL, 0);
- zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL1_SEL,
- OSD_CTRL0_GL1_SEL);
-
/* Release reset for all VOU modules */
zx_writel(vou->vouctl + VOU_SOFT_RST, ~0);
- /* Select main clock for GL0 and aux clock for GL1 module */
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL0_SEL, 0);
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL1_SEL,
- VOU_CLK_GL1_SEL);
-
/* Enable clock auto-gating for all VOU modules */
zx_writel(vou->vouctl + VOU_CLK_REQEN, ~0);
@@ -611,6 +856,8 @@ static int zx_crtc_bind(struct device *dev, struct device *master, void *data)
goto disable_ppu_clk;
}
+ zx_overlay_init(drm, vou);
+
return 0;
disable_ppu_clk:
diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h
index 349e06cd86f4..57e3c31ee6a5 100644
--- a/drivers/gpu/drm/zte/zx_vou.h
+++ b/drivers/gpu/drm/zte/zx_vou.h
@@ -23,24 +23,48 @@ enum vou_inf_id {
VOU_VGA = 5,
};
-enum vou_inf_data_sel {
- VOU_YUV444 = 0,
- VOU_RGB_101010 = 1,
- VOU_RGB_888 = 2,
- VOU_RGB_666 = 3,
+enum vou_inf_hdmi_audio {
+ VOU_HDMI_AUD_SPDIF = BIT(0),
+ VOU_HDMI_AUD_I2S = BIT(1),
+ VOU_HDMI_AUD_DSD = BIT(2),
+ VOU_HDMI_AUD_HBR = BIT(3),
+ VOU_HDMI_AUD_PARALLEL = BIT(4),
};
-struct vou_inf {
- enum vou_inf_id id;
- enum vou_inf_data_sel data_sel;
- u32 clocks_en_bits;
- u32 clocks_sel_bits;
+void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc,
+ enum vou_inf_hdmi_audio aud);
+void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc);
+void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc);
+
+enum vou_div_id {
+ VOU_DIV_VGA,
+ VOU_DIV_PIC,
+ VOU_DIV_TVENC,
+ VOU_DIV_HDMI_PNX,
+ VOU_DIV_HDMI,
+ VOU_DIV_INF,
+ VOU_DIV_LAYER,
+};
+
+enum vou_div_val {
+ VOU_DIV_1 = 0,
+ VOU_DIV_2 = 1,
+ VOU_DIV_4 = 3,
+ VOU_DIV_8 = 7,
};
-void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc);
-void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc);
+struct vou_div_config {
+ enum vou_div_id id;
+ enum vou_div_val val;
+};
+
+void zx_vou_config_dividers(struct drm_crtc *crtc,
+ struct vou_div_config *configs, int num);
int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe);
void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe);
+void zx_vou_layer_enable(struct drm_plane *plane);
+void zx_vou_layer_disable(struct drm_plane *plane);
+
#endif /* __ZX_VOU_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou_regs.h b/drivers/gpu/drm/zte/zx_vou_regs.h
index f44e7a4ae441..c066ef123434 100644
--- a/drivers/gpu/drm/zte/zx_vou_regs.h
+++ b/drivers/gpu/drm/zte/zx_vou_regs.h
@@ -22,6 +22,15 @@
#define AUX_HBSC_OFFSET 0x860
#define AUX_RSZ_OFFSET 0x800
+#define OSD_VL0_OFFSET 0x040
+#define OSD_VL_OFFSET(i) (OSD_VL0_OFFSET + 0x050 * (i))
+
+#define HBSC_VL0_OFFSET 0x760
+#define HBSC_VL_OFFSET(i) (HBSC_VL0_OFFSET + 0x040 * (i))
+
+#define RSZ_VL1_U0 0xa00
+#define RSZ_VL_OFFSET(i) (RSZ_VL1_U0 + 0x200 * (i))
+
/* OSD (GPC_GLOBAL) registers */
#define OSD_INT_STA 0x04
#define OSD_INT_CLRSTA 0x08
@@ -42,6 +51,12 @@
)
#define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT)
#define OSD_CTRL0 0x10
+#define OSD_CTRL0_VL0_EN BIT(13)
+#define OSD_CTRL0_VL0_SEL BIT(12)
+#define OSD_CTRL0_VL1_EN BIT(11)
+#define OSD_CTRL0_VL1_SEL BIT(10)
+#define OSD_CTRL0_VL2_EN BIT(9)
+#define OSD_CTRL0_VL2_SEL BIT(8)
#define OSD_CTRL0_GL0_EN BIT(7)
#define OSD_CTRL0_GL0_SEL BIT(6)
#define OSD_CTRL0_GL1_EN BIT(5)
@@ -60,6 +75,8 @@
#define CHN_SCREEN_H_SHIFT 5
#define CHN_SCREEN_H_MASK (0x1fff << CHN_SCREEN_H_SHIFT)
#define CHN_UPDATE 0x08
+#define CHN_INTERLACE_BUF_CTRL 0x24
+#define CHN_INTERLACE_EN BIT(2)
/* TIMING_CTRL registers */
#define TIMING_TC_ENABLE 0x04
@@ -102,6 +119,19 @@
#define TIMING_MAIN_SHIFT 0x2c
#define TIMING_AUX_SHIFT 0x30
#define H_SHIFT_VAL 0x0048
+#define V_SHIFT_VAL 0x0001
+#define SCAN_CTRL 0x34
+#define AUX_PI_EN BIT(19)
+#define MAIN_PI_EN BIT(18)
+#define AUX_INTERLACE_SEL BIT(1)
+#define MAIN_INTERLACE_SEL BIT(0)
+#define SEC_V_ACTIVE 0x38
+#define SEC_VACT_MAIN_SHIFT 0
+#define SEC_VACT_MAIN_MASK (0xffff << SEC_VACT_MAIN_SHIFT)
+#define SEC_VACT_AUX_SHIFT 16
+#define SEC_VACT_AUX_MASK (0xffff << SEC_VACT_AUX_SHIFT)
+#define SEC_MAIN_V_TIMING 0x3c
+#define SEC_AUX_V_TIMING 0x40
#define TIMING_MAIN_PI_SHIFT 0x68
#define TIMING_AUX_PI_SHIFT 0x6c
#define H_PI_SHIFT_VAL 0x000f
@@ -146,10 +176,31 @@
#define VOU_INF_DATA_SEL 0x08
#define VOU_SOFT_RST 0x14
#define VOU_CLK_SEL 0x18
+#define VGA_AUX_DIV_SHIFT 29
+#define VGA_MAIN_DIV_SHIFT 26
+#define PIC_MAIN_DIV_SHIFT 23
+#define PIC_AUX_DIV_SHIFT 20
+#define VOU_CLK_VL2_SEL BIT(8)
+#define VOU_CLK_VL1_SEL BIT(7)
+#define VOU_CLK_VL0_SEL BIT(6)
#define VOU_CLK_GL1_SEL BIT(5)
#define VOU_CLK_GL0_SEL BIT(4)
+#define VOU_DIV_PARA 0x1c
+#define DIV_PARA_UPDATE BIT(31)
+#define TVENC_AUX_DIV_SHIFT 28
+#define HDMI_AUX_PNX_DIV_SHIFT 25
+#define HDMI_MAIN_PNX_DIV_SHIFT 22
+#define HDMI_AUX_DIV_SHIFT 19
+#define HDMI_MAIN_DIV_SHIFT 16
+#define TVENC_MAIN_DIV_SHIFT 13
+#define INF_AUX_DIV_SHIFT 9
+#define INF_MAIN_DIV_SHIFT 6
+#define LAYER_AUX_DIV_SHIFT 3
+#define LAYER_MAIN_DIV_SHIFT 0
#define VOU_CLK_REQEN 0x20
#define VOU_CLK_EN 0x24
+#define VOU_INF_HDMI_CTRL 0x30
+#define VOU_HDMI_AUD_MASK 0x1f
/* OTFPPU_CTRL registers */
#define OTFPPU_RSZ_DATA_SOURCE 0x04
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index c27858ae0552..eeb021fe6410 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -399,6 +399,7 @@ static int host1x_device_add(struct host1x *host1x,
dev_set_name(&device->dev, "%s", driver->driver.name);
of_dma_configure(&device->dev, host1x->dev->of_node);
device->dev.release = host1x_device_release;
+ device->dev.of_node = host1x->dev->of_node;
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 97218af4fe75..8368e6f766ee 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1238,12 +1238,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
platform_device_put(pdev);
goto err_register;
}
-
- /*
- * Set of_node only after calling platform_device_add. Otherwise
- * the platform:imx-ipuv3-crtc modalias won't be used.
- */
- pdev->dev.of_node = of_node;
}
return 0;
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 63c7292f427a..24e12b87a0cb 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -544,6 +544,7 @@ void ipu_csi_set_downsize(struct ipu_csi *csi, bool horiz, bool vert)
spin_unlock_irqrestore(&csi->lock, flags);
}
+EXPORT_SYMBOL_GPL(ipu_csi_set_downsize);
void ipu_csi_set_test_generator(struct ipu_csi *csi, bool active,
u32 r_value, u32 g_value, u32 b_value,
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 5d3b0db5ce0a..922e4eaed9c5 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -138,6 +138,14 @@ config FB_SYS_IMAGEBLIT
blitting. This is used by drivers that don't provide their own
(accelerated) version and the framebuffer is in system RAM.
+config FB_PROVIDE_GET_FB_UNMAPPED_AREA
+ bool
+ depends on FB
+ default n
+ ---help---
+ Allow generic frame-buffer to provide get_fb_unmapped_area
+ function.
+
menuconfig FB_FOREIGN_ENDIAN
bool "Framebuffer foreign endianness support"
depends on FB
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 76c1ad96fb37..069fe7960df1 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1492,6 +1492,21 @@ __releases(&info->lock)
return 0;
}
+#ifdef CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA
+unsigned long get_fb_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ struct fb_info * const info = filp->private_data;
+ unsigned long fb_size = PAGE_ALIGN(info->fix.smem_len);
+
+ if (pgoff > fb_size || len > fb_size - pgoff)
+ return -EINVAL;
+
+ return (unsigned long)info->screen_base + pgoff;
+}
+#endif
+
static const struct file_operations fb_fops = {
.owner = THIS_MODULE,
.read = fb_read,
@@ -1503,7 +1518,8 @@ static const struct file_operations fb_fops = {
.mmap = fb_mmap,
.open = fb_open,
.release = fb_release,
-#ifdef HAVE_ARCH_FB_UNMAPPED_AREA
+#if defined(HAVE_ARCH_FB_UNMAPPED_AREA) || \
+ defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA)
.get_unmapped_area = get_fb_unmapped_area,
#endif
#ifdef CONFIG_FB_DEFERRED_IO
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index bae79f3c4d28..b080a171a23f 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -27,6 +27,16 @@ enum dw_hdmi_devtype {
RK3288_HDMI,
};
+enum dw_hdmi_phy_type {
+ DW_HDMI_PHY_DWC_HDMI_TX_PHY = 0x00,
+ DW_HDMI_PHY_DWC_MHL_PHY_HEAC = 0xb2,
+ DW_HDMI_PHY_DWC_MHL_PHY = 0xc2,
+ DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY_HEAC = 0xe2,
+ DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY = 0xf2,
+ DW_HDMI_PHY_DWC_HDMI20_TX_PHY = 0xf3,
+ DW_HDMI_PHY_VENDOR_PHY = 0xfe,
+};
+
struct dw_hdmi_mpll_config {
unsigned long mpixelclock;
struct {
@@ -56,10 +66,11 @@ struct dw_hdmi_plat_data {
struct drm_display_mode *mode);
};
-void dw_hdmi_unbind(struct device *dev, struct device *master, void *data);
-int dw_hdmi_bind(struct device *dev, struct device *master,
- void *data, struct drm_encoder *encoder,
- struct resource *iores, int irq,
+int dw_hdmi_probe(struct platform_device *pdev,
+ const struct dw_hdmi_plat_data *plat_data);
+void dw_hdmi_remove(struct platform_device *pdev);
+void dw_hdmi_unbind(struct device *dev);
+int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
const struct dw_hdmi_plat_data *plat_data);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
index 3629b2734db6..fbdfc8d7f3c7 100644
--- a/include/drm/bridge/mhl.h
+++ b/include/drm/bridge/mhl.h
@@ -15,6 +15,8 @@
#ifndef __MHL_H__
#define __MHL_H__
+#include <linux/types.h>
+
/* Device Capabilities Registers */
enum {
MHL_DCAP_DEV_STATE,
@@ -288,4 +290,87 @@ enum {
/* Unsupported/unrecognized key code */
#define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01
+enum mhl_burst_id {
+ MHL_BURST_ID_3D_VIC = 0x10,
+ MHL_BURST_ID_3D_DTD = 0x11,
+ MHL_BURST_ID_HEV_VIC = 0x20,
+ MHL_BURST_ID_HEV_DTDA = 0x21,
+ MHL_BURST_ID_HEV_DTDB = 0x22,
+ MHL_BURST_ID_VC_ASSIGN = 0x38,
+ MHL_BURST_ID_VC_CONFIRM = 0x39,
+ MHL_BURST_ID_AUD_DELAY = 0x40,
+ MHL_BURST_ID_ADT_BURSTID = 0x41,
+ MHL_BURST_ID_BIST_SETUP = 0x51,
+ MHL_BURST_ID_BIST_RETURN_STAT = 0x52,
+ MHL_BURST_ID_EMSC_SUPPORT = 0x61,
+ MHL_BURST_ID_HID_PAYLOAD = 0x62,
+ MHL_BURST_ID_BLK_RCV_BUFFER_INFO = 0x63,
+ MHL_BURST_ID_BITS_PER_PIXEL_FMT = 0x64,
+};
+
+struct mhl_burst_blk_rcv_buffer_info {
+ __be16 id;
+ __le16 size;
+} __packed;
+
+struct mhl3_burst_header {
+ __be16 id;
+ u8 checksum;
+ u8 total_entries;
+ u8 sequence_index;
+} __packed;
+
+struct mhl_burst_bits_per_pixel_fmt {
+ struct mhl3_burst_header hdr;
+ u8 num_entries;
+ struct {
+ u8 stream_id;
+ u8 pixel_format;
+ } __packed desc[0];
+} __packed;
+
+struct mhl_burst_emsc_support {
+ struct mhl3_burst_header hdr;
+ u8 num_entries;
+ __be16 burst_id[0];
+} __packed;
+
+struct mhl_burst_audio_descr {
+ struct mhl3_burst_header hdr;
+ u8 flags;
+ u8 short_desc[9];
+} __packed;
+
+/*
+ * MHL3 infoframe related definitions
+ */
+
+#define MHL3_IEEE_OUI 0x7ca61d
+#define MHL3_INFOFRAME_SIZE 15
+
+enum mhl3_video_format {
+ MHL3_VIDEO_FORMAT_NONE,
+ MHL3_VIDEO_FORMAT_3D,
+ MHL3_VIDEO_FORMAT_MULTI_VIEW,
+ MHL3_VIDEO_FORMAT_DUAL_3D
+};
+
+enum mhl3_3d_format_type {
+ MHL3_3D_FORMAT_TYPE_FS, /* frame sequential */
+ MHL3_3D_FORMAT_TYPE_TB, /* top-bottom */
+ MHL3_3D_FORMAT_TYPE_LR, /* left-right */
+ MHL3_3D_FORMAT_TYPE_FS_TB, /* frame sequential, top-bottom */
+ MHL3_3D_FORMAT_TYPE_FS_LR, /* frame sequential, left-right */
+ MHL3_3D_FORMAT_TYPE_TB_LR /* top-bottom, left-right */
+};
+
+struct mhl3_infoframe {
+ unsigned char version;
+ enum mhl3_video_format video_format;
+ enum mhl3_3d_format_type format_type;
+ bool sep_audio;
+ int hev_format;
+ int av_delay;
+};
+
#endif /* __MHL_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 9c4ee144b5f6..6105c050d7bc 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -360,6 +360,7 @@ struct drm_ioctl_desc {
/* Event queued up for userspace to read */
struct drm_pending_event {
struct completion *completion;
+ void (*completion_release)(struct completion *completion);
struct drm_event *event;
struct dma_fence *fence;
struct list_head link;
@@ -635,6 +636,19 @@ struct drm_device {
int switch_power_state;
};
+/**
+ * drm_drv_uses_atomic_modeset - check if the driver implements
+ * atomic_commit()
+ * @dev: DRM device
+ *
+ * This check is useful if drivers do not have DRIVER_ATOMIC set but
+ * have atomic modesetting internally implemented.
+ */
+static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
+{
+ return dev->mode_config.funcs->atomic_commit != NULL;
+}
+
#include <drm/drm_irq.h>
#define DRM_SWITCH_POWER_ON 0
@@ -718,11 +732,6 @@ int drm_noop(struct drm_device *dev, void *data,
int drm_invalid_op(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-/* Cache management (drm_cache.c) */
-void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
-void drm_clflush_sg(struct sg_table *st);
-void drm_clflush_virt_range(void *addr, unsigned long length);
-
/*
* These are exported to drivers so that they can implement fencing using
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
@@ -782,21 +791,6 @@ extern void drm_sysfs_hotplug_event(struct drm_device *dev);
/*@}*/
-/* PCI section */
-static __inline__ int drm_pci_device_is_agp(struct drm_device *dev)
-{
- if (dev->driver->device_is_agp != NULL) {
- int err = (*dev->driver->device_is_agp) (dev);
-
- if (err != 2) {
- return err;
- }
- }
-
- return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
-}
-void drm_pci_agp_destroy(struct drm_device *dev);
-
extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
#ifdef CONFIG_PCI
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 56814e8ae7ea..052ab161b239 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -123,7 +123,8 @@ struct drm_crtc_commit {
/**
* @commit_entry:
*
- * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
+ * Entry on the per-CRTC &drm_crtc.commit_list. Protected by
+ * $drm_crtc.commit_lock.
*/
struct list_head commit_entry;
@@ -145,6 +146,7 @@ struct __drm_crtcs_state {
struct drm_crtc_state *state;
struct drm_crtc_commit *commit;
s32 __user *out_fence_ptr;
+ unsigned last_vblank_count;
};
struct __drm_connnectors_state {
@@ -188,12 +190,31 @@ struct drm_atomic_state {
struct work_struct commit_work;
};
-void drm_crtc_commit_put(struct drm_crtc_commit *commit);
+void __drm_crtc_commit_free(struct kref *kref);
+
+/**
+ * drm_crtc_commit_get - acquire a reference to the CRTC commit
+ * @commit: CRTC commit
+ *
+ * Increases the reference of @commit.
+ */
static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
{
kref_get(&commit->ref);
}
+/**
+ * drm_crtc_commit_put - release a reference to the CRTC commmit
+ * @commit: CRTC commit
+ *
+ * This releases a reference to @commit which is freed after removing the
+ * final reference. No locking required and callable from any context.
+ */
+static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit)
+{
+ kref_put(&commit->ref, __drm_crtc_commit_free);
+}
+
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
@@ -369,12 +390,6 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
-#ifdef CONFIG_DEBUG_FS
-struct drm_minor;
-int drm_atomic_debugfs_init(struct drm_minor *minor);
-int drm_atomic_debugfs_cleanup(struct drm_minor *minor);
-#endif
-
#define for_each_connector_in_state(__state, connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->num_connector && \
@@ -403,7 +418,7 @@ int drm_atomic_debugfs_cleanup(struct drm_minor *minor);
* drm_atomic_crtc_needs_modeset - compute combined modeset need
* @state: &drm_crtc_state for the CRTC
*
- * To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
+ * To give drivers flexibility &struct drm_crtc_state has 3 booleans to track
* whether the state CRTC changed enough to need a full modeset cycle:
* connectors_changed, mode_changed and active_changed. This helper simply
* combines these three to compute the overall need for a modeset for @state.
@@ -415,7 +430,8 @@ int drm_atomic_debugfs_cleanup(struct drm_minor *minor);
*
* For example if the CRTC mode has changed, and the hardware is able to enact
* the requested mode change without going through a full modeset, the driver
- * should clear mode_changed during its ->atomic_check.
+ * should clear mode_changed in its &drm_mode_config_funcs.atomic_check
+ * implementation.
*/
static inline bool
drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
@@ -424,5 +440,4 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
state->connectors_changed;
}
-
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 7ff92b09fd9c..d066e9491ae3 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -48,9 +48,6 @@ int drm_atomic_helper_commit(struct drm_device *dev,
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state,
bool pre_swap);
-bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
- struct drm_atomic_state *old_state,
- struct drm_crtc *crtc);
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state);
@@ -124,6 +121,12 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags);
+int drm_atomic_helper_page_flip_target(
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags,
+ uint32_t target);
int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
int mode);
struct drm_encoder *
@@ -174,7 +177,8 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
*
* This iterates over the current state, useful (for example) when applying
* atomic state after it has been checked and swapped. To iterate over the
- * planes which *will* be attached (for ->atomic_check()) see
+ * planes which *will* be attached (more useful in code called from
+ * &drm_mode_config_funcs.atomic_check) see
* drm_atomic_crtc_state_for_each_plane().
*/
#define drm_atomic_crtc_for_each_plane(plane, crtc) \
@@ -186,8 +190,9 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
* @crtc_state: the incoming crtc-state
*
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
- * attached if the specified state is applied. Useful during (for example)
- * ->atomic_check() operations, to validate the incoming state.
+ * attached if the specified state is applied. Useful during for example
+ * in code called from &drm_mode_config_funcs.atomic_check operations, to
+ * validate the incoming state.
*/
#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
@@ -199,8 +204,9 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
* @crtc_state: the incoming crtc-state
*
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
- * attached if the specified state is applied. Useful during (for example)
- * ->atomic_check() operations, to validate the incoming state.
+ * attached if the specified state is applied. Useful during for example
+ * in code called from &drm_mode_config_funcs.atomic_check operations, to
+ * validate the incoming state.
*
* Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a
* const plane_state. This is useful when a driver just wants to peek at other
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index 610223b0481b..1eb4a52cad8d 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -33,10 +33,7 @@
*
* @refcount: Refcount for this master object.
* @dev: Link back to the DRM device
- * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
- * @unique_len: Length of unique field. Protected by drm_global_mutex.
- * @magic_map: Map of used authentication tokens. Protected by struct_mutex.
- * @lock: DRI lock information.
+ * @lock: DRI1 lock information.
* @driver_priv: Pointer to driver-private information.
*
* Note that master structures are only relevant for the legacy/primary device
@@ -45,8 +42,20 @@
struct drm_master {
struct kref refcount;
struct drm_device *dev;
+ /**
+ * @unique: Unique identifier: e.g. busid. Protected by
+ * &drm_device.master_mutex.
+ */
char *unique;
+ /**
+ * @unique_len: Length of unique field. Protected by
+ * &drm_device.master_mutex.
+ */
int unique_len;
+ /**
+ * @magic_map: Map of used authentication tokens. Protected by
+ * &drm_device.master_mutex.
+ */
struct idr magic_map;
struct drm_lock_data lock;
void *driver_priv;
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 530a1d6e8cde..fdd82fcbf168 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -96,9 +96,10 @@ struct drm_bridge_funcs {
* This callback should disable the bridge. It is called right before
* the preceding element in the display pipe is disabled. If the
* preceding element is a bridge this means it's called before that
- * bridge's ->disable() function. If the preceding element is a
- * &drm_encoder it's called right before the encoder's ->disable(),
- * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+ * bridge's @disable vfunc. If the preceding element is a &drm_encoder
+ * it's called right before the &drm_encoder_helper_funcs.disable,
+ * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms
+ * hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
@@ -110,12 +111,13 @@ struct drm_bridge_funcs {
/**
* @post_disable:
*
- * This callback should disable the bridge. It is called right after
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's ->post_disable() function. If the preceding element is a
- * &drm_encoder it's called right after the encoder's ->disable(),
- * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+ * This callback should disable the bridge. It is called right after the
+ * preceding element in the display pipe is disabled. If the preceding
+ * element is a bridge this means it's called after that bridge's
+ * @post_disable function. If the preceding element is a &drm_encoder
+ * it's called right after the encoder's
+ * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare
+ * or &drm_encoder_helper_funcs.dpms hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
* singals) feeding it is no longer running when this callback is
@@ -129,9 +131,11 @@ struct drm_bridge_funcs {
* @mode_set:
*
* This callback should set the given mode on the bridge. It is called
- * after the ->mode_set() callback for the preceding element in the
- * display pipeline has been called already. The display pipe (i.e.
- * clocks and timing signals) is off when this function is called.
+ * after the @mode_set callback for the preceding element in the display
+ * pipeline has been called already. If the bridge is the first element
+ * then this would be &drm_encoder_helper_funcs.mode_set. The display
+ * pipe (i.e. clocks and timing signals) is off when this function is
+ * called.
*/
void (*mode_set)(struct drm_bridge *bridge,
struct drm_display_mode *mode,
@@ -142,9 +146,10 @@ struct drm_bridge_funcs {
* This callback should enable the bridge. It is called right before
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called before that
- * bridge's ->pre_enable() function. If the preceding element is a
- * &drm_encoder it's called right before the encoder's ->enable(),
- * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+ * bridge's @pre_enable function. If the preceding element is a
+ * &drm_encoder it's called right before the encoder's
+ * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
+ * &drm_encoder_helper_funcs.dpms hook.
*
* The display pipe (i.e. clocks and timing signals) feeding this bridge
* will not yet be running when this callback is called. The bridge must
@@ -161,9 +166,10 @@ struct drm_bridge_funcs {
* This callback should enable the bridge. It is called right after
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called after that
- * bridge's ->enable() function. If the preceding element is a
- * &drm_encoder it's called right after the encoder's ->enable(),
- * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+ * bridge's @enable function. If the preceding element is a
+ * &drm_encoder it's called right after the encoder's
+ * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
+ * &drm_encoder_helper_funcs.dpms hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
@@ -201,8 +207,8 @@ struct drm_bridge {
int drm_bridge_add(struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
struct drm_bridge *of_drm_find_bridge(struct device_node *np);
-int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
-void drm_bridge_detach(struct drm_bridge *bridge);
+int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
+ struct drm_bridge *previous);
bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index cebecff536a3..beab0f0d0cfb 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -33,7 +33,11 @@
#ifndef _DRM_CACHE_H_
#define _DRM_CACHE_H_
+#include <linux/scatterlist.h>
+
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+void drm_clflush_sg(struct sg_table *st);
+void drm_clflush_virt_range(void *addr, unsigned long length);
static inline bool drm_arch_can_wc_memory(void)
{
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index c767238ac9d5..bce4a532836d 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -25,6 +25,8 @@
#include <linux/ctype.h>
+uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision);
+
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
uint degamma_lut_size,
bool has_ctm,
@@ -33,29 +35,4 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
-/**
- * drm_color_lut_extract - clamp&round LUT entries
- * @user_input: input value
- * @bit_precision: number of bits the hw LUT supports
- *
- * Extract a degamma/gamma LUT value provided by user (in the form of
- * &drm_color_lut entries) and round it to the precision supported by the
- * hardware.
- */
-static inline uint32_t drm_color_lut_extract(uint32_t user_input,
- uint32_t bit_precision)
-{
- uint32_t val = user_input;
- uint32_t max = 0xffff >> (16 - bit_precision);
-
- /* Round only if we're not using full precision. */
- if (bit_precision < 16) {
- val += 1UL << (16 - bit_precision - 1);
- val >>= 16 - bit_precision;
- }
-
- return clamp_val(val, 0, max);
-}
-
-
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 045a97cbeba2..e5e1eddd19fb 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -94,7 +94,7 @@ enum subpixel_order {
*
* Describes a given display (e.g. CRT or flat panel) and its limitations. For
* fixed display sinks like built-in panels there's not much difference between
- * this and struct &drm_connector. But for sinks with a real cable this
+ * this and &struct drm_connector. But for sinks with a real cable this
* structure is meant to describe all the things at the other end of the cable.
*
* For sinks which provide an EDID this can be filled out by calling
@@ -117,7 +117,7 @@ struct drm_display_info {
/**
* @pixel_clock: Maximum pixel clock supported by the sink, in units of
- * 100Hz. This mismatches the clok in &drm_display_mode (which is in
+ * 100Hz. This mismatches the clock in &drm_display_mode (which is in
* kHZ), because that's what the EDID uses as base unit.
*/
unsigned int pixel_clock;
@@ -331,15 +331,15 @@ struct drm_connector_funcs {
*
* Entry point for output detection and basic mode validation. The
* driver should reprobe the output if needed (e.g. when hotplug
- * handling is unreliable), add all detected modes to connector->modes
+ * handling is unreliable), add all detected modes to &drm_connector.modes
* and filter out any the device can't support in any configuration. It
* also needs to filter out any modes wider or higher than the
* parameters max_width and max_height indicate.
*
* The drivers must also prune any modes no longer valid from
- * connector->modes. Furthermore it must update connector->status and
- * connector->edid. If no EDID has been received for this output
- * connector->edid must be NULL.
+ * &drm_connector.modes. Furthermore it must update
+ * &drm_connector.status and &drm_connector.edid. If no EDID has been
+ * received for this output connector->edid must be NULL.
*
* Drivers using the probe helpers should use
* drm_helper_probe_single_connector_modes() or
@@ -348,7 +348,7 @@ struct drm_connector_funcs {
*
* RETURNS:
*
- * The number of modes detected and filled into connector->modes.
+ * The number of modes detected and filled into &drm_connector.modes.
*/
int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
@@ -381,7 +381,7 @@ struct drm_connector_funcs {
* core drm connector interfaces. Everything added from this callback
* should be unregistered in the early_unregister callback.
*
- * This is called while holding drm_connector->mutex.
+ * This is called while holding &drm_connector.mutex.
*
* Returns:
*
@@ -398,7 +398,7 @@ struct drm_connector_funcs {
* early in the driver unload sequence to disable userspace access
* before data structures are torndown.
*
- * This is called while holding drm_connector->mutex.
+ * This is called while holding &drm_connector.mutex.
*/
void (*early_unregister)(struct drm_connector *connector);
@@ -418,17 +418,17 @@ struct drm_connector_funcs {
* Duplicate the current atomic state for this connector and return it.
* The core and helpers guarantee that any atomic state duplicated with
* this hook and still owned by the caller (i.e. not transferred to the
- * driver by calling ->atomic_commit() from struct
- * &drm_mode_config_funcs) will be cleaned up by calling the
- * @atomic_destroy_state hook in this structure.
+ * driver by calling &drm_mode_config_funcs.atomic_commit) will be
+ * cleaned up by calling the @atomic_destroy_state hook in this
+ * structure.
*
- * Atomic drivers which don't subclass struct &drm_connector_state should use
+ * Atomic drivers which don't subclass &struct drm_connector_state should use
* drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
* __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
* duplicated in a consistent fashion across drivers.
*
- * It is an error to call this hook before connector->state has been
+ * It is an error to call this hook before &drm_connector.state has been
* initialized correctly.
*
* NOTE:
@@ -525,7 +525,7 @@ struct drm_connector_funcs {
/**
* @atomic_print_state:
*
- * If driver subclasses struct &drm_connector_state, it should implement
+ * If driver subclasses &struct drm_connector_state, it should implement
* this optional hook for printing additional driver specific state.
*
* Do not call this directly, use drm_atomic_connector_print_state()
@@ -563,9 +563,6 @@ struct drm_cmdline_mode {
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
* @stereo_allowed: can this connector handle stereo modes?
- * @modes: modes available on this connector (from fill_modes() + user)
- * @status: one of the drm_connector_status enums (connected, not, or unknown)
- * @probed_modes: list of modes derived directly from the display
* @funcs: connector control functions
* @edid_blob_ptr: DRM property containing EDID if present
* @properties: property tracking for this connector
@@ -612,8 +609,8 @@ struct drm_connector {
/**
* @mutex: Lock for general connector state, but currently only protects
- * @registered. Most of the connector state is still protected by the
- * mutex in &drm_mode_config.
+ * @registered. Most of the connector state is still protected by
+ * &drm_mode_config.mutex.
*/
struct mutex mutex;
@@ -635,19 +632,37 @@ struct drm_connector {
* Protected by @mutex.
*/
bool registered;
- struct list_head modes; /* list of modes on this connector */
+ /**
+ * @modes:
+ * Modes available on this connector (from fill_modes() + user).
+ * Protected by &drm_mode_config.mutex.
+ */
+ struct list_head modes;
+
+ /**
+ * @status:
+ * One of the drm_connector_status enums (connected, not, or unknown).
+ * Protected by &drm_mode_config.mutex.
+ */
enum drm_connector_status status;
- /* these are modes added by probing with DDC or the BIOS */
+ /**
+ * @probed_modes:
+ * These are modes added by probing with DDC or the BIOS, before
+ * filtering is applied. Used by the probe helpers. Protected by
+ * &drm_mode_config.mutex.
+ */
struct list_head probed_modes;
/**
* @display_info: Display information is filled from EDID information
* when a display is detected. For non hot-pluggable displays such as
* flat panels in embedded systems, the driver should initialize the
- * display_info.width_mm and display_info.height_mm fields with the
- * physical size of the display.
+ * &drm_display_info.width_mm and &drm_display_info.height_mm fields
+ * with the physical size of the display.
+ *
+ * Protected by &drm_mode_config.mutex.
*/
struct drm_display_info display_info;
const struct drm_connector_funcs *funcs;
@@ -853,12 +868,46 @@ void drm_mode_put_tile_group(struct drm_device *dev,
* @dev: the DRM device
*
* Iterate over all connectors of @dev.
+ *
+ * WARNING:
+ *
+ * This iterator is not safe against hotadd/removal of connectors and is
+ * deprecated. Use drm_for_each_connector_iter() instead.
*/
#define drm_for_each_connector(connector, dev) \
- for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \
- connector = list_first_entry(&(dev)->mode_config.connector_list, \
- struct drm_connector, head); \
- &connector->head != (&(dev)->mode_config.connector_list); \
- connector = list_next_entry(connector, head))
+ list_for_each_entry(connector, &(dev)->mode_config.connector_list, head)
+
+/**
+ * struct drm_connector_list_iter - connector_list iterator
+ *
+ * This iterator tracks state needed to be able to walk the connector_list
+ * within struct drm_mode_config. Only use together with
+ * drm_connector_list_iter_get(), drm_connector_list_iter_put() and
+ * drm_connector_list_iter_next() respectively the convenience macro
+ * drm_for_each_connector_iter().
+ */
+struct drm_connector_list_iter {
+/* private: */
+ struct drm_device *dev;
+ struct drm_connector *conn;
+};
+
+void drm_connector_list_iter_get(struct drm_device *dev,
+ struct drm_connector_list_iter *iter);
+struct drm_connector *
+drm_connector_list_iter_next(struct drm_connector_list_iter *iter);
+void drm_connector_list_iter_put(struct drm_connector_list_iter *iter);
+
+/**
+ * drm_for_each_connector_iter - connector_list iterator macro
+ * @connector: &struct drm_connector pointer used as cursor
+ * @iter: &struct drm_connector_list_iter
+ *
+ * Note that @connector is only valid within the list body, if you want to use
+ * @connector after calling drm_connector_list_iter_put() then you need to grab
+ * your own reference first using drm_connector_reference().
+ */
+#define drm_for_each_connector_iter(connector, iter) \
+ while ((connector = drm_connector_list_iter_next(iter)))
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 946672f97e1e..8f0b195e4a59 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -39,7 +39,6 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_modes.h>
#include <drm/drm_connector.h>
-#include <drm/drm_encoder.h>
#include <drm/drm_property.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
@@ -68,14 +67,12 @@ static inline uint64_t I642U64(int64_t val)
}
struct drm_crtc;
-struct drm_encoder;
struct drm_pending_vblank_event;
struct drm_plane;
struct drm_bridge;
struct drm_atomic_state;
struct drm_crtc_helper_funcs;
-struct drm_encoder_helper_funcs;
struct drm_plane_helper_funcs;
/**
@@ -84,8 +81,8 @@ struct drm_plane_helper_funcs;
* @enable: whether the CRTC should be enabled, gates all other state
* @active: whether the CRTC is actively displaying (used for DPMS)
* @planes_changed: planes on this crtc are updated
- * @mode_changed: crtc_state->mode or crtc_state->enable has been changed
- * @active_changed: crtc_state->active has been toggled.
+ * @mode_changed: @mode or @enable has been changed
+ * @active_changed: @active has been toggled.
* @connectors_changed: connectors to this crtc have been updated
* @zpos_changed: zpos values of planes on this crtc have been updated
* @color_mgmt_changed: color management properties have changed (degamma or
@@ -93,8 +90,6 @@ struct drm_plane_helper_funcs;
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
* @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
- * @last_vblank_count: for helpers and drivers to capture the vblank of the
- * update to ensure framebuffer cleanup isn't done too early
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
* @mode: current mode timings
* @mode_blob: &drm_property_blob for @mode
@@ -107,9 +102,10 @@ struct drm_plane_helper_funcs;
*
* Note that the distinction between @enable and @active is rather subtile:
* Flipping @active while @enable is set without changing anything else may
- * never return in a failure from the ->atomic_check callback. Userspace assumes
- * that a DPMS On will always succeed. In other words: @enable controls resource
- * assignment, @active controls the actual hardware state.
+ * never return in a failure from the &drm_mode_config_funcs.atomic_check
+ * callback. Userspace assumes that a DPMS On will always succeed. In other
+ * words: @enable controls resource assignment, @active controls the actual
+ * hardware state.
*
* The three booleans active_changed, connectors_changed and mode_changed are
* intended to indicate whether a full modeset is needed, rather than strictly
@@ -140,9 +136,6 @@ struct drm_crtc_state {
u32 connector_mask;
u32 encoder_mask;
- /* last_vblank_count: for vblank waits before cleanup */
- u32 last_vblank_count;
-
/* adjusted_mode: for use by helpers and drivers */
struct drm_display_mode adjusted_mode;
@@ -157,6 +150,15 @@ struct drm_crtc_state {
struct drm_property_blob *gamma_lut;
/**
+ * @target_vblank:
+ *
+ * Target vertical blank period when a page flip
+ * should take effect.
+ */
+
+ u32 target_vblank;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
@@ -323,7 +325,7 @@ struct drm_crtc_funcs {
*
* This is the main legacy entry point to change the modeset state on a
* CRTC. All the details of the desired configuration are passed in a
- * struct &drm_mode_set - see there for details.
+ * &struct drm_mode_set - see there for details.
*
* Drivers implementing atomic modeset should use
* drm_atomic_helper_set_config() to implement this hook.
@@ -345,8 +347,8 @@ struct drm_crtc_funcs {
* through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application
* requests a page flip the DRM core verifies that the new frame buffer
* is large enough to be scanned out by the CRTC in the currently
- * configured mode and then calls the CRTC ->page_flip() operation with a
- * pointer to the new frame buffer.
+ * configured mode and then calls this hook with a pointer to the new
+ * frame buffer.
*
* The driver must wait for any pending rendering to the new framebuffer
* to complete before executing the flip. It should also wait for any
@@ -354,7 +356,7 @@ struct drm_crtc_funcs {
* shared dma-buf.
*
* An application can request to be notified when the page flip has
- * completed. The drm core will supply a struct &drm_event in the event
+ * completed. The drm core will supply a &struct drm_event in the event
* parameter in this case. This can be handled by the
* drm_crtc_send_vblank_event() function, which the driver should call on
* the provided event upon completion of the flip. Note that if
@@ -381,7 +383,7 @@ struct drm_crtc_funcs {
* RETURNS:
*
* 0 on success or a negative error code on failure. Note that if a
- * ->page_flip() operation is already pending the callback should return
+ * page flip operation is already pending the callback should return
* -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode
* or just runtime disabled through DPMS respectively the new atomic
* "ACTIVE" state) should result in an -EINVAL error code. Note that
@@ -433,19 +435,19 @@ struct drm_crtc_funcs {
* @atomic_duplicate_state:
*
* Duplicate the current atomic state for this CRTC and return it.
- * The core and helpers gurantee that any atomic state duplicated with
+ * The core and helpers guarantee that any atomic state duplicated with
* this hook and still owned by the caller (i.e. not transferred to the
- * driver by calling ->atomic_commit() from struct
- * &drm_mode_config_funcs) will be cleaned up by calling the
- * @atomic_destroy_state hook in this structure.
+ * driver by calling &drm_mode_config_funcs.atomic_commit) will be
+ * cleaned up by calling the @atomic_destroy_state hook in this
+ * structure.
*
- * Atomic drivers which don't subclass struct &drm_crtc should use
+ * Atomic drivers which don't subclass &struct drm_crtc_state should use
* drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
* __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is
* duplicated in a consistent fashion across drivers.
*
- * It is an error to call this hook before crtc->state has been
+ * It is an error to call this hook before &drm_crtc.state has been
* initialized correctly.
*
* NOTE:
@@ -558,7 +560,7 @@ struct drm_crtc_funcs {
*
* This optional hook should be used to unregister the additional
* userspace interfaces attached to the crtc from
- * late_unregister(). It is called from drm_dev_unregister(),
+ * @late_register. It is called from drm_dev_unregister(),
* early in the driver unload sequence to disable userspace access
* before data structures are torndown.
*/
@@ -591,7 +593,7 @@ struct drm_crtc_funcs {
/**
* @atomic_print_state:
*
- * If driver subclasses struct &drm_crtc_state, it should implement
+ * If driver subclasses &struct drm_crtc_state, it should implement
* this optional hook for printing additional driver specific state.
*
* Do not call this directly, use drm_atomic_crtc_print_state()
@@ -639,8 +641,8 @@ struct drm_crtc {
*
* This provides a read lock for the overall crtc state (mode, dpms
* state, ...) and a write lock for everything which can be update
- * without a full modeset (fb, cursor data, crtc properties ...). Full
- * modeset also need to grab dev->mode_config.connection_mutex.
+ * without a full modeset (fb, cursor data, crtc properties ...). A full
+ * modeset also need to grab &drm_mode_config.connection_mutex.
*/
struct drm_modeset_lock mutex;
@@ -772,10 +774,8 @@ struct drm_crtc {
* @connectors: array of connectors to drive with this CRTC if possible
* @num_connectors: size of @connectors array
*
- * Represents a single crtc the connectors that it drives with what mode
- * and from which framebuffer it scans out from.
- *
- * This is used to set modes.
+ * This represents a modeset configuration for the legacy SETCRTC ioctl and is
+ * also used internally. Atomic drivers instead use &drm_atomic_state.
*/
struct drm_mode_set {
struct drm_framebuffer *fb;
@@ -824,14 +824,21 @@ static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
return 1 << drm_crtc_index(crtc);
}
-void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
- int *hdisplay, int *vdisplay);
int drm_crtc_force_disable(struct drm_crtc *crtc);
int drm_crtc_force_disable_all(struct drm_device *dev);
int drm_mode_set_config_internal(struct drm_mode_set *set);
+struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx);
-/* Helpers */
+/**
+ * drm_crtc_find - look up a CRTC object from its ID
+ * @dev: DRM device
+ * @id: &drm_mode_object ID
+ *
+ * This can be used to look up a CRTC from its userspace ID. Only used by
+ * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS
+ * userspace interface should be done using &drm_property.
+ */
static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
uint32_t id)
{
@@ -840,21 +847,14 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
return mo ? obj_to_crtc(mo) : NULL;
}
+/**
+ * drm_for_each_crtc - iterate over all CRTCs
+ * @crtc: a &struct drm_crtc as the loop cursor
+ * @dev: the &struct drm_device
+ *
+ * Iterate over all CRTCs of @dev.
+ */
#define drm_for_each_crtc(crtc, dev) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
-static inline void
-assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
-{
- /*
- * The connector hotadd/remove code currently grabs both locks when
- * updating lists. Hence readers need only hold either of them to be
- * safe and the check amounts to
- *
- * WARN_ON(not_holding(A) && not_holding(B)).
- */
- WARN_ON(!mutex_is_locked(&mode_config->mutex) &&
- !drm_modeset_is_locked(&mode_config->connection_mutex));
-}
-
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 982c299e435a..d026f5017c33 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -73,6 +73,5 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
-extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 55bbeb0ff594..04681359a6f5 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -194,7 +194,8 @@
# define DP_PSR_SETUP_TIME_0 (6 << 1)
# define DP_PSR_SETUP_TIME_MASK (7 << 1)
# define DP_PSR_SETUP_TIME_SHIFT 1
-
+# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */
+# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */
/*
* 0x80-0x8f describe downstream port capabilities, but there are two layouts
* based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
@@ -568,6 +569,16 @@
#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
+#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */
+# define DP_GTC_CAP (1 << 0) /* DP 1.3 */
+# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */
+# define DP_AV_SYNC_CAP (1 << 2) /* DP 1.3 */
+# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED (1 << 3) /* DP 1.3 */
+# define DP_VSC_EXT_VESA_SDP_SUPPORTED (1 << 4) /* DP 1.4 */
+# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */
+# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
+# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
+
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 003207670597..f4b4d154b98e 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -414,7 +414,7 @@ struct drm_dp_mst_topology_mgr {
/**
* @dev: device pointer for adding i2c devices etc.
*/
- struct device *dev;
+ struct drm_device *dev;
/**
* @cbs: callbacks for connector addition and destruction.
*/
@@ -493,8 +493,8 @@ struct drm_dp_mst_topology_mgr {
int total_pbn;
/**
- * @qlock: protects @tx_msg_downq, the tx_slots in struct
- * &drm_dp_mst_branch and txmsg->state once they are queued
+ * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and
+ * &drm_dp_sideband_msg_tx.state once they are queued
*/
struct mutex qlock;
/**
@@ -508,8 +508,7 @@ struct drm_dp_mst_topology_mgr {
struct mutex payload_lock;
/**
* @proposed_vcpis: Array of pointers for the new VCPI allocation. The
- * VCPI structure itself is embedded into the corresponding
- * &drm_dp_mst_port structure.
+ * VCPI structure itself is &drm_dp_mst_port.vcpi.
*/
struct drm_dp_vcpi **proposed_vcpis;
/**
@@ -556,7 +555,10 @@ struct drm_dp_mst_topology_mgr {
struct work_struct destroy_connector_work;
};
-int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
+int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_device *dev, struct drm_dp_aux *aux,
+ int max_dpcd_transaction_bytes,
+ int max_payloads, int conn_base_id);
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index c4fc49583dc0..5699f42195fe 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -64,16 +64,55 @@ struct drm_mode_create_dumb;
* structure for GEM drivers.
*/
struct drm_driver {
+
+ /**
+ * @load:
+ *
+ * Backward-compatible driver callback to complete
+ * initialization steps after the driver is registered. For
+ * this reason, may suffer from race conditions and its use is
+ * deprecated for new drivers. It is therefore only supported
+ * for existing drivers not yet converted to the new scheme.
+ * See drm_dev_init() and drm_dev_register() for proper and
+ * race-free way to set up a &struct drm_device.
+ *
+ * Returns:
+ *
+ * Zero on success, non-zero value on failure.
+ */
int (*load) (struct drm_device *, unsigned long flags);
- int (*firstopen) (struct drm_device *);
int (*open) (struct drm_device *, struct drm_file *);
void (*preclose) (struct drm_device *, struct drm_file *file_priv);
void (*postclose) (struct drm_device *, struct drm_file *);
void (*lastclose) (struct drm_device *);
- int (*unload) (struct drm_device *);
- int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
- int (*dma_quiescent) (struct drm_device *);
- int (*context_dtor) (struct drm_device *dev, int context);
+
+ /**
+ * @unload:
+ *
+ * Reverse the effects of the driver load callback. Ideally,
+ * the clean up performed by the driver should happen in the
+ * reverse order of the initialization. Similarly to the load
+ * hook, this handler is deprecated and its usage should be
+ * dropped in favor of an open-coded teardown function at the
+ * driver layer. See drm_dev_unregister() and drm_dev_unref()
+ * for the proper way to remove a &struct drm_device.
+ *
+ * The unload() hook is called right after unregistering
+ * the device.
+ *
+ */
+ void (*unload) (struct drm_device *);
+
+ /**
+ * @release:
+ *
+ * Optional callback for destroying device data after the final
+ * reference is released, i.e. the device is being destroyed. Drivers
+ * using this callback are responsible for calling drm_dev_fini()
+ * to finalize the device and then freeing the struct themselves.
+ */
+ void (*release) (struct drm_device *);
+
int (*set_busid)(struct drm_device *dev, struct drm_master *master);
/**
@@ -119,20 +158,6 @@ struct drm_driver {
void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
/**
- * @device_is_agp:
- *
- * Called by drm_device_is_agp(). Typically used to determine if a card
- * is really attached to AGP or not.
- *
- * Returns:
- *
- * One of three values is returned depending on whether or not the
- * card is absolutely not AGP (return of 0), absolutely is AGP
- * (return of 1), or may or may not be AGP (return of 2).
- */
- int (*device_is_agp) (struct drm_device *dev);
-
- /**
* @get_scanout_position:
*
* Called by vblank timestamping code.
@@ -282,7 +307,7 @@ struct drm_driver {
/**
* @gem_free_object_unlocked: deconstructor for drm_gem_objects
*
- * This is for drivers which are not encumbered with dev->struct_mutex
+ * This is for drivers which are not encumbered with &drm_device.struct_mutex
* legacy locking schemes. Use this hook instead of @gem_free_object.
*/
void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
@@ -327,9 +352,6 @@ struct drm_driver {
int (*gem_prime_mmap)(struct drm_gem_object *obj,
struct vm_area_struct *vma);
- /* vga arb irq handler */
- void (*vgaarb_irq)(struct drm_device *dev, bool state);
-
/**
* @dumb_create:
*
@@ -398,13 +420,20 @@ struct drm_driver {
char *date;
u32 driver_features;
- int dev_priv_size;
const struct drm_ioctl_desc *ioctls;
int num_ioctls;
const struct file_operations *fops;
+ /* Everything below here is for legacy driver, never use! */
+ /* private: */
+
/* List of devices hanging off this driver with stealth attach. */
struct list_head legacy_dev_list;
+ int (*firstopen) (struct drm_device *);
+ int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
+ int (*dma_quiescent) (struct drm_device *);
+ int (*context_dtor) (struct drm_device *dev, int context);
+ int dev_priv_size;
};
extern __printf(6, 7)
@@ -419,6 +448,8 @@ extern unsigned int drm_debug;
int drm_dev_init(struct drm_device *dev,
struct drm_driver *driver,
struct device *parent);
+void drm_dev_fini(struct drm_device *dev);
+
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
struct device *parent);
int drm_dev_register(struct drm_device *dev, unsigned long flags);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 2705a66b770b..577d5063e63d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -24,6 +24,7 @@
#define __DRM_EDID_H__
#include <linux/types.h>
+#include <linux/hdmi.h>
struct drm_device;
struct i2c_adapter;
@@ -323,8 +324,6 @@ struct cea_sad {
struct drm_encoder;
struct drm_connector;
struct drm_display_mode;
-struct hdmi_avi_infoframe;
-struct hdmi_vendor_infoframe;
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
@@ -347,6 +346,11 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode);
+void
+drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
+ const struct drm_display_mode *mode,
+ enum hdmi_quantization_range rgb_quant_range,
+ bool rgb_quant_range_selectable);
/**
* drm_eld_mnl - Get ELD monitor name length in bytes.
@@ -455,6 +459,8 @@ enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
bool drm_detect_hdmi_monitor(struct edid *edid);
bool drm_detect_monitor_audio(struct edid *edid);
bool drm_rgb_quant_range_selectable(struct edid *edid);
+enum hdmi_quantization_range
+drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
void drm_set_preferred_mode(struct drm_connector *connector,
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index c7438ff0d609..8d8245ec0181 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -25,8 +25,12 @@
#include <linux/list.h>
#include <linux/ctype.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mode.h>
#include <drm/drm_mode_object.h>
+struct drm_encoder;
+
/**
* struct drm_encoder_funcs - encoder controls
*
@@ -71,7 +75,7 @@ struct drm_encoder_funcs {
*
* This optional hook should be used to unregister the additional
* userspace interfaces attached to the encoder from
- * late_unregister(). It is called from drm_dev_unregister(),
+ * @late_register. It is called from drm_dev_unregister(),
* early in the driver unload sequence to disable userspace access
* before data structures are torndown.
*/
@@ -188,9 +192,6 @@ static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
return encoder->index;
}
-/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
-static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc);
-
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index 82cdf611393d..1107b4b1c599 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -29,6 +29,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
/**
* struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 3b00f6480b83..a5ecc0a58260 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -16,19 +16,17 @@ struct drm_plane;
struct drm_plane_state;
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int num_crtc,
- unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs);
+ unsigned int preferred_bpp, unsigned int max_conn_count,
+ const struct drm_framebuffer_funcs *funcs);
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int num_crtc,
- unsigned int max_conn_count);
+ unsigned int preferred_bpp, unsigned int max_conn_count);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state);
-int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes,
- const struct drm_framebuffer_funcs *funcs);
+void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
+ int state);
void drm_fb_cma_destroy(struct drm_framebuffer *fb);
int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 975deedd593e..6f5acebb266a 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -181,7 +181,7 @@ struct drm_fb_helper_connector {
*
* This is the main structure used by the fbdev helpers. Drivers supporting
* fbdev emulation should embedded this into their overall driver structure.
- * Drivers must also fill out a struct &drm_fb_helper_funcs with a few
+ * Drivers must also fill out a &struct drm_fb_helper_funcs with a few
* operations.
*/
struct drm_fb_helper {
@@ -236,8 +236,7 @@ struct drm_fb_helper {
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *helper, int crtc_count,
- int max_conn);
+ struct drm_fb_helper *helper, int max_conn);
void drm_fb_helper_fini(struct drm_fb_helper *helper);
int drm_fb_helper_blank(int blank, struct fb_info *info);
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
@@ -295,8 +294,7 @@ struct drm_display_mode *
drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
int width, int height);
struct drm_display_mode *
-drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
- int width, int height);
+drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn);
int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
@@ -309,7 +307,7 @@ static inline void drm_fb_helper_prepare(struct drm_device *dev,
}
static inline int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *helper, int crtc_count,
+ struct drm_fb_helper *helper,
int max_conn)
{
return 0;
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index d387cf06ae05..21c3d512d25c 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -54,7 +54,7 @@ typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
/**
* struct drm_flip_task - flip work task
* @node: list entry element
- * @data: data to pass to work->func
+ * @data: data to pass to &drm_flip_work.func
*/
struct drm_flip_task {
struct list_head node;
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index a232e7f0c869..dd1e3e99dcff 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -40,8 +40,8 @@ struct drm_framebuffer_funcs {
*
* Clean up framebuffer resources, specifically also unreference the
* backing storage. The core guarantees to call this function for every
- * framebuffer successfully created by ->fb_create() in
- * &drm_mode_config_funcs. Drivers must also call
+ * framebuffer successfully created by calling
+ * &drm_mode_config_funcs.fb_create. Drivers must also call
* drm_framebuffer_cleanup() to release DRM core resources for this
* framebuffer.
*/
@@ -51,7 +51,7 @@ struct drm_framebuffer_funcs {
* @create_handle:
*
* Create a buffer handle in the driver-specific buffer manager (either
- * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
+ * GEM or TTM) valid for the passed-in &struct drm_file. This is used by
* the core to implement the GETFB IOCTL, which returns (for
* sufficiently priviledged user) also a native buffer handle. This can
* be used for seamless transitions between modesetting clients by
@@ -112,8 +112,8 @@ struct drm_framebuffer {
*/
struct drm_device *dev;
/**
- * @head: Place on the dev->mode_config.fb_list, access protected by
- * dev->mode_config.fb_lock.
+ * @head: Place on the &drm_mode_config.fb_list, access protected by
+ * &drm_mode_config.fb_lock.
*/
struct list_head head;
@@ -122,6 +122,10 @@ struct drm_framebuffer {
*/
struct drm_mode_object base;
/**
+ * @format: framebuffer format information
+ */
+ const struct drm_format_info *format;
+ /**
* @funcs: framebuffer vfunc table
*/
const struct drm_framebuffer_funcs *funcs;
@@ -145,7 +149,7 @@ struct drm_framebuffer {
*
* This should not be used to specifiy x/y pixel offsets into the buffer
* data (even for linear buffers). Specifying an x/y pixel offset is
- * instead done through the source rectangle in struct &drm_plane_state.
+ * instead done through the source rectangle in &struct drm_plane_state.
*/
unsigned int offsets[4];
/**
@@ -166,28 +170,11 @@ struct drm_framebuffer {
*/
unsigned int height;
/**
- * @depth: Depth in bits per pixel for RGB formats. 0 for everything
- * else. Legacy information derived from @pixel_format, it's suggested to use
- * the DRM FOURCC codes and helper functions directly instead.
- */
- unsigned int depth;
- /**
- * @bits_per_pixel: Storage used bits per pixel for RGB formats. 0 for
- * everything else. Legacy information derived from @pixel_format, it's
- * suggested to use the DRM FOURCC codes and helper functions directly
- * instead.
- */
- int bits_per_pixel;
- /**
* @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or
* DRM_MODE_FB_MODIFIERS.
*/
int flags;
/**
- * @pixel_format: DRM FOURCC code describing the pixel format.
- */
- uint32_t pixel_format; /* fourcc format */
- /**
* @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
* IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
* universal plane.
@@ -200,8 +187,7 @@ struct drm_framebuffer {
*/
int hot_y;
/**
- * @filp_head: Placed on struct &drm_file fbs list_head, protected by
- * fbs_lock in the same structure.
+ * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
*/
struct list_head filp_head;
};
@@ -273,8 +259,8 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p,
* @fb: the loop cursor
* @dev: the DRM device
*
- * Iterate over all framebuffers of @dev. User must hold the fb_lock from
- * &drm_mode_config.
+ * Iterate over all framebuffers of @dev. User must hold
+ * &drm_mode_config.fb_lock.
*/
#define drm_for_each_fb(fb, dev) \
for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \
@@ -282,4 +268,10 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p,
struct drm_framebuffer, head); \
&fb->head != (&(dev)->mode_config.fb_list); \
fb = list_next_entry(fb, head))
+
+int drm_framebuffer_plane_width(int width,
+ const struct drm_framebuffer *fb, int plane);
+int drm_framebuffer_plane_height(int height,
+ const struct drm_framebuffer *fb, int plane);
+
#endif
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 9f63736e6163..449a41b56ffc 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -63,7 +63,7 @@ struct drm_gem_object {
* drops to 0 any global names (e.g. the id in the flink namespace) will
* be cleared.
*
- * Protected by dev->object_name_lock.
+ * Protected by &drm_device.object_name_lock.
*/
unsigned handle_count;
@@ -106,8 +106,8 @@ struct drm_gem_object {
* @name:
*
* Global name for this object, starts at 1. 0 means unnamed.
- * Access is covered by dev->object_name_lock. This is used by the GEM_FLINK
- * and GEM_OPEN ioctls.
+ * Access is covered by &drm_device.object_name_lock. This is used by
+ * the GEM_FLINK and GEM_OPEN ioctls.
*/
int name;
@@ -150,7 +150,7 @@ struct drm_gem_object {
* through importing or exporting). We break the resulting reference
* loop when the last gem handle for this object is released.
*
- * Protected by obj->object_name_lock.
+ * Protected by &drm_device.object_name_lock.
*/
struct dma_buf *dma_buf;
@@ -163,7 +163,7 @@ struct drm_gem_object {
* attachment point for the device. This is invariant over the lifetime
* of a gem object.
*
- * The driver's ->gem_free_object callback is responsible for cleaning
+ * The &drm_driver.gem_free_object callback is responsible for cleaning
* up the dma_buf attachment and references acquired at import time.
*
* Note that the drm gem/prime core does not depend upon drivers setting
@@ -204,7 +204,7 @@ drm_gem_object_reference(struct drm_gem_object *obj)
* @obj: GEM buffer object
*
* This function is meant to be used by drivers which are not encumbered with
- * dev->struct_mutex legacy locking and which are using the
+ * &drm_device.struct_mutex legacy locking and which are using the
* gem_free_object_unlocked callback. It avoids all the locking checks and
* locking overhead of drm_gem_object_unreference() and
* drm_gem_object_unreference_unlocked().
@@ -212,8 +212,8 @@ drm_gem_object_reference(struct drm_gem_object *obj)
* Drivers should never call this directly in their code. Instead they should
* wrap it up into a ``driver_gem_object_unreference(struct driver_gem_object
* *obj)`` wrapper function, and use that. Shared code should never call this, to
- * avoid breaking drivers by accident which still depend upon dev->struct_mutex
- * locking.
+ * avoid breaking drivers by accident which still depend upon
+ * &drm_device.struct_mutex locking.
*/
static inline void
__drm_gem_object_unreference(struct drm_gem_object *obj)
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index acd6af8a8e67..2abcd5190cc1 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -53,6 +53,23 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
extern const struct vm_operations_struct drm_gem_cma_vm_ops;
+#ifndef CONFIG_MMU
+unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+#else
+static inline unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ return -EINVAL;
+}
+#endif
+
#ifdef CONFIG_DEBUG_FS
void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
#endif
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
index 293d08caab60..2fb880462a57 100644
--- a/include/drm/drm_irq.h
+++ b/include/drm/drm_irq.h
@@ -51,8 +51,8 @@ struct drm_pending_vblank_event {
*
* Note that for historical reasons - the vblank handling code is still shared
* with legacy/non-kms drivers - this is a free-standing structure not directly
- * connected to struct &drm_crtc. But all public interface functions are taking
- * a struct &drm_crtc to hide this implementation detail.
+ * connected to &struct drm_crtc. But all public interface functions are taking
+ * a &struct drm_crtc to hide this implementation detail.
*/
struct drm_vblank_crtc {
/**
@@ -67,7 +67,7 @@ struct drm_vblank_crtc {
* @disable_timer: Disable timer for the delayed vblank disabling
* hysteresis logic. Vblank disabling is controlled through the
* drm_vblank_offdelay module option and the setting of the
- * max_vblank_count value in the &drm_device structure.
+ * &drm_device.max_vblank_count value.
*/
struct timer_list disable_timer;
@@ -92,7 +92,7 @@ struct drm_vblank_crtc {
*/
atomic_t refcount; /* number of users of vblank interruptsper crtc */
/**
- * @last: Protected by dev->vbl_lock, used for wraparound handling.
+ * @last: Protected by &drm_device.vbl_lock, used for wraparound handling.
*/
u32 last;
/**
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 0b8371795aeb..d81b0ba9921f 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -1,6 +1,7 @@
/**************************************************************************
*
* Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
+ * Copyright 2016 Intel Corporation
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -41,48 +42,129 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-#endif
#ifdef CONFIG_DRM_DEBUG_MM
#include <linux/stackdepot.h>
#endif
+#include <drm/drm_print.h>
-enum drm_mm_search_flags {
- DRM_MM_SEARCH_DEFAULT = 0,
- DRM_MM_SEARCH_BEST = 1 << 0,
- DRM_MM_SEARCH_BELOW = 1 << 1,
-};
+#ifdef CONFIG_DRM_DEBUG_MM
+#define DRM_MM_BUG_ON(expr) BUG_ON(expr)
+#else
+#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
-enum drm_mm_allocator_flags {
- DRM_MM_CREATE_DEFAULT = 0,
- DRM_MM_CREATE_TOP = 1 << 0,
-};
+/**
+ * enum drm_mm_insert_mode - control search and allocation behaviour
+ *
+ * The &struct drm_mm range manager supports finding a suitable modes using
+ * a number of search trees. These trees are oranised by size, by address and
+ * in most recent eviction order. This allows the user to find either the
+ * smallest hole to reuse, the lowest or highest address to reuse, or simply
+ * reuse the most recent eviction that fits. When allocating the &drm_mm_node
+ * from within the hole, the &drm_mm_insert_mode also dictate whether to
+ * allocate the lowest matching address or the highest.
+ */
+enum drm_mm_insert_mode {
+ /**
+ * @DRM_MM_INSERT_BEST:
+ *
+ * Search for the smallest hole (within the search range) that fits
+ * the desired node.
+ *
+ * Allocates the node from the bottom of the found hole.
+ */
+ DRM_MM_INSERT_BEST = 0,
+
+ /**
+ * @DRM_MM_INSERT_LOW:
+ *
+ * Search for the lowest hole (address closest to 0, within the search
+ * range) that fits the desired node.
+ *
+ * Allocates the node from the bottom of the found hole.
+ */
+ DRM_MM_INSERT_LOW,
-#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
-#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
+ /**
+ * @DRM_MM_INSERT_HIGH:
+ *
+ * Search for the highest hole (address closest to U64_MAX, within the
+ * search range) that fits the desired node.
+ *
+ * Allocates the node from the *top* of the found hole. The specified
+ * alignment for the node is applied to the base of the node
+ * (&drm_mm_node.start).
+ */
+ DRM_MM_INSERT_HIGH,
+ /**
+ * @DRM_MM_INSERT_EVICT:
+ *
+ * Search for the most recently evicted hole (within the search range)
+ * that fits the desired node. This is appropriate for use immediately
+ * after performing an eviction scan (see drm_mm_scan_init()) and
+ * removing the selected nodes to form a hole.
+ *
+ * Allocates the node from the bottom of the found hole.
+ */
+ DRM_MM_INSERT_EVICT,
+};
+
+/**
+ * struct drm_mm_node - allocated block in the DRM allocator
+ *
+ * This represents an allocated block in a &drm_mm allocator. Except for
+ * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is
+ * entirely opaque and should only be accessed through the provided funcions.
+ * Since allocation of these nodes is entirely handled by the driver they can be
+ * embedded.
+ */
struct drm_mm_node {
- struct list_head node_list;
- struct list_head hole_stack;
- struct rb_node rb;
- unsigned hole_follows : 1;
- unsigned scanned_block : 1;
- unsigned scanned_prev_free : 1;
- unsigned scanned_next_free : 1;
- unsigned scanned_preceeds_hole : 1;
- unsigned allocated : 1;
+ /** @color: Opaque driver-private tag. */
unsigned long color;
+ /** @start: Start address of the allocated block. */
u64 start;
+ /** @size: Size of the allocated block. */
u64 size;
- u64 __subtree_last;
+ /* private: */
struct drm_mm *mm;
+ struct list_head node_list;
+ struct list_head hole_stack;
+ struct rb_node rb;
+ struct rb_node rb_hole_size;
+ struct rb_node rb_hole_addr;
+ u64 __subtree_last;
+ u64 hole_size;
+ bool allocated : 1;
+ bool scanned_block : 1;
#ifdef CONFIG_DRM_DEBUG_MM
depot_stack_handle_t stack;
#endif
};
+/**
+ * struct drm_mm - DRM allocator
+ *
+ * DRM range allocator with a few special functions and features geared towards
+ * managing GPU memory. Except for the @color_adjust callback the structure is
+ * entirely opaque and should only be accessed through the provided functions
+ * and macros. This structure can be embedded into larger driver structures.
+ */
struct drm_mm {
+ /**
+ * @color_adjust:
+ *
+ * Optional driver callback to further apply restrictions on a hole. The
+ * node argument points at the node containing the hole from which the
+ * block would be allocated (see drm_mm_hole_follows() and friends). The
+ * other arguments are the size of the block to be allocated. The driver
+ * can adjust the start and end as needed to e.g. insert guard pages.
+ */
+ void (*color_adjust)(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start, u64 *end);
+
+ /* private: */
/* List of all memory nodes that immediately precede a free hole. */
struct list_head hole_stack;
/* head_node.node_list is the list of all memory nodes, ordered
@@ -90,33 +172,53 @@ struct drm_mm {
struct drm_mm_node head_node;
/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
struct rb_root interval_tree;
+ struct rb_root holes_size;
+ struct rb_root holes_addr;
- unsigned int scan_check_range : 1;
- unsigned scan_alignment;
- unsigned long scan_color;
- u64 scan_size;
- u64 scan_hit_start;
- u64 scan_hit_end;
- unsigned scanned_blocks;
- u64 scan_start;
- u64 scan_end;
- struct drm_mm_node *prev_scanned_node;
-
- void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
- u64 *start, u64 *end);
+ unsigned long scan_active;
+};
+
+/**
+ * struct drm_mm_scan - DRM allocator eviction roaster data
+ *
+ * This structure tracks data needed for the eviction roaster set up using
+ * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and
+ * drm_mm_scan_remove_block(). The structure is entirely opaque and should only
+ * be accessed through the provided functions and macros. It is meant to be
+ * allocated temporarily by the driver on the stack.
+ */
+struct drm_mm_scan {
+ /* private: */
+ struct drm_mm *mm;
+
+ u64 size;
+ u64 alignment;
+ u64 remainder_mask;
+
+ u64 range_start;
+ u64 range_end;
+
+ u64 hit_start;
+ u64 hit_end;
+
+ unsigned long color;
+ enum drm_mm_insert_mode mode;
};
/**
* drm_mm_node_allocated - checks whether a node is allocated
* @node: drm_mm_node to check
*
- * Drivers should use this helpers for proper encapusulation of drm_mm
+ * Drivers are required to clear a node prior to using it with the
+ * drm_mm range manager.
+ *
+ * Drivers should use this helper for proper encapsulation of drm_mm
* internals.
*
* Returns:
* True if the @node is allocated.
*/
-static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
+static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
{
return node->allocated;
}
@@ -125,18 +227,38 @@ static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
* drm_mm_initialized - checks whether an allocator is initialized
* @mm: drm_mm to check
*
- * Drivers should use this helpers for proper encapusulation of drm_mm
+ * Drivers should clear the struct drm_mm prior to initialisation if they
+ * want to use this function.
+ *
+ * Drivers should use this helper for proper encapsulation of drm_mm
* internals.
*
* Returns:
* True if the @mm is initialized.
*/
-static inline bool drm_mm_initialized(struct drm_mm *mm)
+static inline bool drm_mm_initialized(const struct drm_mm *mm)
{
return mm->hole_stack.next;
}
-static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+/**
+ * drm_mm_hole_follows - checks whether a hole follows this node
+ * @node: drm_mm_node to check
+ *
+ * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
+ * If you wish to know whether a hole follows this particular node,
+ * query this function. See also drm_mm_hole_node_start() and
+ * drm_mm_hole_node_end().
+ *
+ * Returns:
+ * True if a hole follows the @node.
+ */
+static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
+{
+ return node->hole_size;
+}
+
+static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
{
return hole_node->start + hole_node->size;
}
@@ -145,20 +267,20 @@ static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
* drm_mm_hole_node_start - computes the start of the hole following @node
* @hole_node: drm_mm_node which implicitly tracks the following hole
*
- * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
- * inspect holes themselves. Drivers must check first whether a hole indeed
- * follows by looking at node->hole_follows.
+ * This is useful for driver-specific debug dumpers. Otherwise drivers should
+ * not inspect holes themselves. Drivers must check first whether a hole indeed
+ * follows by looking at drm_mm_hole_follows()
*
* Returns:
* Start of the subsequent hole.
*/
-static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
{
- BUG_ON(!hole_node->hole_follows);
+ DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
return __drm_mm_hole_node_start(hole_node);
}
-static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
{
return list_next_entry(hole_node, node_list)->start;
}
@@ -167,148 +289,162 @@ static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
* drm_mm_hole_node_end - computes the end of the hole following @node
* @hole_node: drm_mm_node which implicitly tracks the following hole
*
- * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
- * inspect holes themselves. Drivers must check first whether a hole indeed
- * follows by looking at node->hole_follows.
+ * This is useful for driver-specific debug dumpers. Otherwise drivers should
+ * not inspect holes themselves. Drivers must check first whether a hole indeed
+ * follows by looking at drm_mm_hole_follows().
*
* Returns:
* End of the subsequent hole.
*/
-static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
{
return __drm_mm_hole_node_end(hole_node);
}
/**
+ * drm_mm_nodes - list of nodes under the drm_mm range manager
+ * @mm: the struct drm_mm range manger
+ *
+ * As the drm_mm range manager hides its node_list deep with its
+ * structure, extracting it looks painful and repetitive. This is
+ * not expected to be used outside of the drm_mm_for_each_node()
+ * macros and similar internal functions.
+ *
+ * Returns:
+ * The node list, may be empty.
+ */
+#define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
+
+/**
* drm_mm_for_each_node - iterator to walk over all allocated nodes
- * @entry: drm_mm_node structure to assign to in each iteration step
- * @mm: drm_mm allocator to walk
+ * @entry: &struct drm_mm_node to assign to in each iteration step
+ * @mm: &drm_mm allocator to walk
*
* This iterator walks over all nodes in the range allocator. It is implemented
- * with list_for_each, so not save against removal of elements.
+ * with list_for_each(), so not save against removal of elements.
*/
-#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
- &(mm)->head_node.node_list, \
- node_list)
-
-#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
- for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
- &entry->hole_stack != &(mm)->hole_stack ? \
- hole_start = drm_mm_hole_node_start(entry), \
- hole_end = drm_mm_hole_node_end(entry), \
- 1 : 0; \
- entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
+#define drm_mm_for_each_node(entry, mm) \
+ list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
+
+/**
+ * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
+ * @entry: &struct drm_mm_node to assign to in each iteration step
+ * @next: &struct drm_mm_node to store the next step
+ * @mm: &drm_mm allocator to walk
+ *
+ * This iterator walks over all nodes in the range allocator. It is implemented
+ * with list_for_each_safe(), so save against removal of elements.
+ */
+#define drm_mm_for_each_node_safe(entry, next, mm) \
+ list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
/**
* drm_mm_for_each_hole - iterator to walk over all holes
- * @entry: drm_mm_node used internally to track progress
- * @mm: drm_mm allocator to walk
+ * @pos: &drm_mm_node used internally to track progress
+ * @mm: &drm_mm allocator to walk
* @hole_start: ulong variable to assign the hole start to on each iteration
* @hole_end: ulong variable to assign the hole end to on each iteration
*
* This iterator walks over all holes in the range allocator. It is implemented
- * with list_for_each, so not save against removal of elements. @entry is used
+ * with list_for_each(), so not save against removal of elements. @entry is used
* internally and will not reflect a real drm_mm_node for the very first hole.
* Hence users of this iterator may not access it.
*
* Implementation Note:
* We need to inline list_for_each_entry in order to be able to set hole_start
* and hole_end on each iteration while keeping the macro sane.
- *
- * The __drm_mm_for_each_hole version is similar, but with added support for
- * going backwards.
*/
-#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
- __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
+#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
+ for (pos = list_first_entry(&(mm)->hole_stack, \
+ typeof(*pos), hole_stack); \
+ &pos->hole_stack != &(mm)->hole_stack ? \
+ hole_start = drm_mm_hole_node_start(pos), \
+ hole_end = hole_start + pos->hole_size, \
+ 1 : 0; \
+ pos = list_next_entry(pos, hole_stack))
/*
* Basic range manager support (drm_mm.c)
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
+int drm_mm_insert_node_in_range(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ u64 size,
+ u64 alignment,
+ unsigned long color,
+ u64 start,
+ u64 end,
+ enum drm_mm_insert_mode mode);
-int drm_mm_insert_node_generic(struct drm_mm *mm,
- struct drm_mm_node *node,
- u64 size,
- unsigned alignment,
- unsigned long color,
- enum drm_mm_search_flags sflags,
- enum drm_mm_allocator_flags aflags);
/**
- * drm_mm_insert_node - search for space and insert @node
+ * drm_mm_insert_node_generic - search for space and insert @node
* @mm: drm_mm to allocate from
* @node: preallocate node to insert
* @size: size of the allocation
* @alignment: alignment of the allocation
- * @flags: flags to fine-tune the allocation
+ * @color: opaque tag value to use for this node
+ * @mode: fine-tune the allocation search and placement
*
- * This is a simplified version of drm_mm_insert_node_generic() with @color set
- * to 0.
+ * This is a simplified version of drm_mm_insert_node_in_range_generic() with no
+ * range restrictions applied.
*
* The preallocated node must be cleared to 0.
*
* Returns:
* 0 on success, -ENOSPC if there's no suitable hole.
*/
-static inline int drm_mm_insert_node(struct drm_mm *mm,
- struct drm_mm_node *node,
- u64 size,
- unsigned alignment,
- enum drm_mm_search_flags flags)
+static inline int
+drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ u64 size, u64 alignment,
+ unsigned long color,
+ enum drm_mm_insert_mode mode)
{
- return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
- DRM_MM_CREATE_DEFAULT);
+ return drm_mm_insert_node_in_range(mm, node,
+ size, alignment, color,
+ 0, U64_MAX, mode);
}
-int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
- struct drm_mm_node *node,
- u64 size,
- unsigned alignment,
- unsigned long color,
- u64 start,
- u64 end,
- enum drm_mm_search_flags sflags,
- enum drm_mm_allocator_flags aflags);
/**
- * drm_mm_insert_node_in_range - ranged search for space and insert @node
+ * drm_mm_insert_node - search for space and insert @node
* @mm: drm_mm to allocate from
* @node: preallocate node to insert
* @size: size of the allocation
- * @alignment: alignment of the allocation
- * @start: start of the allowed range for this node
- * @end: end of the allowed range for this node
- * @flags: flags to fine-tune the allocation
*
- * This is a simplified version of drm_mm_insert_node_in_range_generic() with
- * @color set to 0.
+ * This is a simplified version of drm_mm_insert_node_generic() with @color set
+ * to 0.
*
* The preallocated node must be cleared to 0.
*
* Returns:
* 0 on success, -ENOSPC if there's no suitable hole.
*/
-static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
- struct drm_mm_node *node,
- u64 size,
- unsigned alignment,
- u64 start,
- u64 end,
- enum drm_mm_search_flags flags)
+static inline int drm_mm_insert_node(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ u64 size)
{
- return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
- 0, start, end, flags,
- DRM_MM_CREATE_DEFAULT);
+ return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
}
void drm_mm_remove_node(struct drm_mm_node *node);
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
-void drm_mm_init(struct drm_mm *mm,
- u64 start,
- u64 size);
+void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
void drm_mm_takedown(struct drm_mm *mm);
-bool drm_mm_clean(struct drm_mm *mm);
+
+/**
+ * drm_mm_clean - checks whether an allocator is clean
+ * @mm: drm_mm allocator to check
+ *
+ * Returns:
+ * True if the allocator is completely free, false if there's still a node
+ * allocated in it.
+ */
+static inline bool drm_mm_clean(const struct drm_mm *mm)
+{
+ return list_empty(drm_mm_nodes(mm));
+}
struct drm_mm_node *
-__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
+__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
/**
* drm_mm_for_each_node_in_range - iterator to walk over a range of
@@ -329,22 +465,49 @@ __drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
node__ && node__->start < (end__); \
node__ = list_next_entry(node__, node_list))
-void drm_mm_init_scan(struct drm_mm *mm,
- u64 size,
- unsigned alignment,
- unsigned long color);
-void drm_mm_init_scan_with_range(struct drm_mm *mm,
- u64 size,
- unsigned alignment,
- unsigned long color,
- u64 start,
- u64 end);
-bool drm_mm_scan_add_block(struct drm_mm_node *node);
-bool drm_mm_scan_remove_block(struct drm_mm_node *node);
-
-void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
-#ifdef CONFIG_DEBUG_FS
-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
-#endif
+void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
+ struct drm_mm *mm,
+ u64 size, u64 alignment, unsigned long color,
+ u64 start, u64 end,
+ enum drm_mm_insert_mode mode);
+
+/**
+ * drm_mm_scan_init - initialize lru scanning
+ * @scan: scan state
+ * @mm: drm_mm to scan
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for the allocation
+ * @mode: fine-tune the allocation search and placement
+ *
+ * This is a simplified version of drm_mm_scan_init_with_range() with no range
+ * restrictions applied.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole.
+ *
+ * Warning:
+ * As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
+ struct drm_mm *mm,
+ u64 size,
+ u64 alignment,
+ unsigned long color,
+ enum drm_mm_insert_mode mode)
+{
+ drm_mm_scan_init_with_range(scan, mm,
+ size, alignment, color,
+ 0, U64_MAX, mode);
+}
+
+bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
+ struct drm_mm_node *node);
+bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
+ struct drm_mm_node *node);
+struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
+
+void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
#endif
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 137432386310..26ff46ab26fb 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -47,7 +47,7 @@ struct drm_mode_config_funcs {
*
* Create a new framebuffer object. The core does basic checks on the
* requested metadata, but most of that is left to the driver. See
- * struct &drm_mode_fb_cmd2 for details.
+ * &struct drm_mode_fb_cmd2 for details.
*
* If the parameters are deemed valid and the backing storage objects in
* the underlying memory manager all exist, then the driver allocates
@@ -132,10 +132,10 @@ struct drm_mode_config_funcs {
* that before calling this hook.
*
* See the documentation of @atomic_commit for an exhaustive list of
- * error conditions which don't have to be checked at the
- * ->atomic_check() stage?
+ * error conditions which don't have to be checked at the in this
+ * callback.
*
- * See the documentation for struct &drm_atomic_state for how exactly
+ * See the documentation for &struct drm_atomic_state for how exactly
* an atomic modeset update is described.
*
* Drivers using the atomic helpers can implement this hook using
@@ -171,7 +171,7 @@ struct drm_mode_config_funcs {
* calling this function, and that nothing has been changed in the
* interim.
*
- * See the documentation for struct &drm_atomic_state for how exactly
+ * See the documentation for &struct drm_atomic_state for how exactly
* an atomic modeset update is described.
*
* Drivers using the atomic helpers can implement this hook using
@@ -198,10 +198,10 @@ struct drm_mode_config_funcs {
* completed. These events are per-CRTC and can be distinguished by the
* CRTC index supplied in &drm_event to userspace.
*
- * The drm core will supply a struct &drm_event in the event
- * member of each CRTC's &drm_crtc_state structure. See the
- * documentation for &drm_crtc_state for more details about the precise
- * semantics of this event.
+ * The drm core will supply a &struct drm_event in each CRTC's
+ * &drm_crtc_state.event. See the documentation for
+ * &drm_crtc_state.event for more details about the precise semantics of
+ * this event.
*
* NOTE:
*
@@ -365,7 +365,13 @@ struct drm_mode_config {
struct list_head fb_list;
/**
- * @num_connector: Number of connectors on this device.
+ * @connector_list_lock: Protects @num_connector and
+ * @connector_list.
+ */
+ spinlock_t connector_list_lock;
+ /**
+ * @num_connector: Number of connectors on this device. Protected by
+ * @connector_list_lock.
*/
int num_connector;
/**
@@ -373,7 +379,9 @@ struct drm_mode_config {
*/
struct ida connector_ida;
/**
- * @connector_list: List of connector objects.
+ * @connector_list: List of connector objects. Protected by
+ * @connector_list_lock. Only use drm_for_each_connector_iter() and
+ * &struct drm_connector_list_iter to walk this list.
*/
struct list_head connector_list;
int num_encoder;
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index 43460b21d112..2c017adf6d74 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -86,10 +86,15 @@ struct drm_object_properties {
*
* Note that atomic drivers do not store mutable properties in this
* array, but only the decoded values in the corresponding state
- * structure. The decoding is done using the ->atomic_get_property and
- * ->atomic_set_property hooks of the corresponding object. Hence atomic
- * drivers should not use drm_object_property_set_value() and
- * drm_object_property_get_value() on mutable objects, i.e. those
+ * structure. The decoding is done using the &drm_crtc.atomic_get_property and
+ * &drm_crtc.atomic_set_property hooks for &struct drm_crtc. For
+ * &struct drm_plane the hooks are &drm_plane_funcs.atomic_get_property and
+ * &drm_plane_funcs.atomic_set_property. And for &struct drm_connector
+ * the hooks are &drm_connector_funcs.atomic_get_property and
+ * &drm_connector_funcs.atomic_set_property .
+ *
+ * Hence atomic drivers should not use drm_object_property_set_value()
+ * and drm_object_property_get_value() on mutable objects, i.e. those
* without the DRM_MODE_PROP_IMMUTABLE flag set.
*/
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 9934d91619c1..6dd34280e892 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -459,6 +459,8 @@ int of_get_drm_display_mode(struct device_node *np,
void drm_mode_set_name(struct drm_display_mode *mode);
int drm_mode_hsync(const struct drm_display_mode *mode);
int drm_mode_vrefresh(const struct drm_display_mode *mode);
+void drm_mode_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay);
void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
index b8051d5abe10..cb0ec92e11e6 100644
--- a/include/drm/drm_modeset_helper.h
+++ b/include/drm/drm_modeset_helper.h
@@ -27,7 +27,8 @@
void drm_helper_move_panel_connectors_to_head(struct drm_device *);
-void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
+ struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd);
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 69c3974bf133..091c42205667 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -30,6 +30,7 @@
#define __DRM_MODESET_HELPER_VTABLES_H__
#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
/**
* DOC: overview
@@ -110,9 +111,9 @@ struct drm_crtc_helper_funcs {
* This callback is used to validate a mode. The parameter mode is the
* display mode that userspace requested, adjusted_mode is the mode the
* encoders need to be fed with. Note that this is the inverse semantics
- * of the meaning for the &drm_encoder and &drm_bridge
- * ->mode_fixup() functions. If the CRTC cannot support the requested
- * conversion from mode to adjusted_mode it should reject the modeset.
+ * of the meaning for the &drm_encoder and &drm_bridge_funcs.mode_fixup
+ * vfunc. If the CRTC cannot support the requested conversion from mode
+ * to adjusted_mode it should reject the modeset.
*
* This function is used by both legacy CRTC helpers and atomic helpers.
* With atomic helpers it is optional.
@@ -133,17 +134,18 @@ struct drm_crtc_helper_funcs {
*
* Also beware that neither core nor helpers filter modes before
* passing them to the driver: While the list of modes that is
- * advertised to userspace is filtered using the connector's
- * ->mode_valid() callback, neither the core nor the helpers do any
- * filtering on modes passed in from userspace when setting a mode. It
- * is therefore possible for userspace to pass in a mode that was
- * previously filtered out using ->mode_valid() or add a custom mode
- * that wasn't probed from EDID or similar to begin with. Even though
- * this is an advanced feature and rarely used nowadays, some users rely
- * on being able to specify modes manually so drivers must be prepared
- * to deal with it. Specifically this means that all drivers need not
- * only validate modes in ->mode_valid() but also in ->mode_fixup() to
- * make sure invalid modes passed in from userspace are rejected.
+ * advertised to userspace is filtered using the
+ * &drm_connector.mode_valid callback, neither the core nor the helpers
+ * do any filtering on modes passed in from userspace when setting a
+ * mode. It is therefore possible for userspace to pass in a mode that
+ * was previously filtered out using &drm_connector.mode_valid or add a
+ * custom mode that wasn't probed from EDID or similar to begin with.
+ * Even though this is an advanced feature and rarely used nowadays,
+ * some users rely on being able to specify modes manually so drivers
+ * must be prepared to deal with it. Specifically this means that all
+ * drivers need not only validate modes in &drm_connector.mode_valid but
+ * also in this or in the &drm_encoder_helper_funcs.mode_fixup callback
+ * to make sure invalid modes passed in from userspace are rejected.
*
* RETURNS:
*
@@ -204,7 +206,7 @@ struct drm_crtc_helper_funcs {
* optimized fast-path instead of a full mode set operation with all the
* resulting flickering. If it is not present
* drm_crtc_helper_set_config() will fall back to a full modeset, using
- * the ->mode_set() callback. Since it can't update other planes it's
+ * the @mode_set callback. Since it can't update other planes it's
* incompatible with atomic modeset support.
*
* This callback is only used by the CRTC helpers and deprecated.
@@ -237,8 +239,7 @@ struct drm_crtc_helper_funcs {
/**
* @load_lut:
*
- * Load a LUT prepared with the @gamma_set functions from
- * &drm_fb_helper_funcs.
+ * Load a LUT prepared with the &drm_fb_helper_funcs.gamma_set vfunc.
*
* This callback is optional and is only used by the fbdev emulation
* helpers.
@@ -256,10 +257,11 @@ struct drm_crtc_helper_funcs {
*
* This callback should be used to disable the CRTC. With the atomic
* drivers it is called after all encoders connected to this CRTC have
- * been shut off already using their own ->disable hook. If that
- * sequence is too simple drivers can just add their own hooks and call
- * it from this CRTC callback here by looping over all encoders
- * connected to it using for_each_encoder_on_crtc().
+ * been shut off already using their own
+ * &drm_encoder_helper_funcs.disable hook. If that sequence is too
+ * simple drivers can just add their own hooks and call it from this
+ * CRTC callback here by looping over all encoders connected to it using
+ * for_each_encoder_on_crtc().
*
* This hook is used both by legacy CRTC helpers and atomic helpers.
* Atomic drivers don't need to implement it if there's no need to
@@ -288,10 +290,10 @@ struct drm_crtc_helper_funcs {
*
* This callback should be used to enable the CRTC. With the atomic
* drivers it is called before all encoders connected to this CRTC are
- * enabled through the encoder's own ->enable hook. If that sequence is
- * too simple drivers can just add their own hooks and call it from this
- * CRTC callback here by looping over all encoders connected to it using
- * for_each_encoder_on_crtc().
+ * enabled through the encoder's own &drm_encoder_helper_funcs.enable
+ * hook. If that sequence is too simple drivers can just add their own
+ * hooks and call it from this CRTC callback here by looping over all
+ * encoders connected to it using for_each_encoder_on_crtc().
*
* This hook is used only by atomic helpers, for symmetry with @disable.
* Atomic drivers don't need to implement it if there's no need to
@@ -315,16 +317,16 @@ struct drm_crtc_helper_funcs {
* beforehand. This is calling order used by the default helper
* implementation in drm_atomic_helper_check().
*
- * When using drm_atomic_helper_check_planes() CRTCs' ->atomic_check()
- * hooks are called after the ones for planes, which allows drivers to
- * assign shared resources requested by planes in the CRTC callback
- * here. For more complicated dependencies the driver can call the provided
- * check helpers multiple times until the computed state has a final
- * configuration and everything has been checked.
+ * When using drm_atomic_helper_check_planes() this hook is called
+ * after the &drm_plane_helper_funcs.atomc_check hook for planes, which
+ * allows drivers to assign shared resources requested by planes in this
+ * callback here. For more complicated dependencies the driver can call
+ * the provided check helpers multiple times until the computed state
+ * has a final configuration and everything has been checked.
*
* This function is also allowed to inspect any other object's state and
* can add more state objects to the atomic commit if needed. Care must
- * be taken though to ensure that state check&compute functions for
+ * be taken though to ensure that state check and compute functions for
* these added states are all called, and derived state in other objects
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
@@ -399,10 +401,11 @@ struct drm_crtc_helper_funcs {
*
* This callback should be used to disable the CRTC. With the atomic
* drivers it is called after all encoders connected to this CRTC have
- * been shut off already using their own ->disable hook. If that
- * sequence is too simple drivers can just add their own hooks and call
- * it from this CRTC callback here by looping over all encoders
- * connected to it using for_each_encoder_on_crtc().
+ * been shut off already using their own
+ * &drm_encoder_helper_funcs.disable hook. If that sequence is too
+ * simple drivers can just add their own hooks and call it from this
+ * CRTC callback here by looping over all encoders connected to it using
+ * for_each_encoder_on_crtc().
*
* This hook is used only by atomic helpers. Atomic drivers don't
* need to implement it if there's no need to disable anything at the
@@ -482,16 +485,18 @@ struct drm_encoder_helper_funcs {
* Also beware that neither core nor helpers filter modes before
* passing them to the driver: While the list of modes that is
* advertised to userspace is filtered using the connector's
- * ->mode_valid() callback, neither the core nor the helpers do any
- * filtering on modes passed in from userspace when setting a mode. It
- * is therefore possible for userspace to pass in a mode that was
- * previously filtered out using ->mode_valid() or add a custom mode
- * that wasn't probed from EDID or similar to begin with. Even though
- * this is an advanced feature and rarely used nowadays, some users rely
- * on being able to specify modes manually so drivers must be prepared
- * to deal with it. Specifically this means that all drivers need not
- * only validate modes in ->mode_valid() but also in ->mode_fixup() to
- * make sure invalid modes passed in from userspace are rejected.
+ * &drm_connector_helper_funcs.mode_valid callback, neither the core nor
+ * the helpers do any filtering on modes passed in from userspace when
+ * setting a mode. It is therefore possible for userspace to pass in a
+ * mode that was previously filtered out using
+ * &drm_connector_helper_funcs.mode_valid or add a custom mode that
+ * wasn't probed from EDID or similar to begin with. Even though this
+ * is an advanced feature and rarely used nowadays, some users rely on
+ * being able to specify modes manually so drivers must be prepared to
+ * deal with it. Specifically this means that all drivers need not only
+ * validate modes in &drm_connector.mode_valid but also in this or in
+ * the &drm_crtc_helper_funcs.mode_fixup callback to make sure
+ * invalid modes passed in from userspace are rejected.
*
* RETURNS:
*
@@ -543,7 +548,7 @@ struct drm_encoder_helper_funcs {
* use this hook, because the helper library calls it only once and not
* every time the display pipeline is suspend using either DPMS or the
* new "ACTIVE" property. Such drivers should instead move all their
- * encoder setup into the ->enable() callback.
+ * encoder setup into the @enable callback.
*
* This callback is used both by the legacy CRTC helpers and the atomic
* modeset helpers. It is optional in the atomic helpers.
@@ -569,7 +574,7 @@ struct drm_encoder_helper_funcs {
* use this hook, because the helper library calls it only once and not
* every time the display pipeline is suspended using either DPMS or the
* new "ACTIVE" property. Such drivers should instead move all their
- * encoder setup into the ->enable() callback.
+ * encoder setup into the @enable callback.
*
* This callback is used by the atomic modeset helpers in place of the
* @mode_set callback, if set by the driver. It is optional and should
@@ -620,10 +625,10 @@ struct drm_encoder_helper_funcs {
*
* This callback should be used to disable the encoder. With the atomic
* drivers it is called before this encoder's CRTC has been shut off
- * using the CRTC's own ->disable hook. If that sequence is too simple
- * drivers can just add their own driver private encoder hooks and call
- * them from CRTC's callback by looping over all encoders connected to
- * it using for_each_encoder_on_crtc().
+ * using their own &drm_crtc_helper_funcs.disable hook. If that
+ * sequence is too simple drivers can just add their own driver private
+ * encoder hooks and call them from CRTC's callback by looping over all
+ * encoders connected to it using for_each_encoder_on_crtc().
*
* This hook is used both by legacy CRTC helpers and atomic helpers.
* Atomic drivers don't need to implement it if there's no need to
@@ -650,10 +655,10 @@ struct drm_encoder_helper_funcs {
*
* This callback should be used to enable the encoder. With the atomic
* drivers it is called after this encoder's CRTC has been enabled using
- * the CRTC's own ->enable hook. If that sequence is too simple drivers
- * can just add their own driver private encoder hooks and call them
- * from CRTC's callback by looping over all encoders connected to it
- * using for_each_encoder_on_crtc().
+ * their own &drm_crtc_helper_funcs.enable hook. If that sequence is
+ * too simple drivers can just add their own driver private encoder
+ * hooks and call them from CRTC's callback by looping over all encoders
+ * connected to it using for_each_encoder_on_crtc().
*
* This hook is used only by atomic helpers, for symmetry with @disable.
* Atomic drivers don't need to implement it if there's no need to
@@ -715,7 +720,7 @@ struct drm_connector_helper_funcs {
* @get_modes:
*
* This function should fill in all modes currently valid for the sink
- * into the connector->probed_modes list. It should also update the
+ * into the &drm_connector.probed_modes list. It should also update the
* EDID property by calling drm_mode_connector_update_edid_property().
*
* The usual way to implement this is to cache the EDID retrieved in the
@@ -724,8 +729,9 @@ struct drm_connector_helper_funcs {
* them by calling drm_add_edid_modes(). But connectors that driver a
* fixed panel can also manually add specific modes using
* drm_mode_probed_add(). Drivers which manually add modes should also
- * make sure that the @display_info, @width_mm and @height_mm fields of the
- * struct &drm_connector are filled in.
+ * make sure that the &drm_connector.display_info,
+ * &drm_connector.width_mm and &drm_connector.height_mm fields are
+ * filled in.
*
* Virtual drivers that just want some standard VESA mode with a given
* resolution can call drm_add_modes_noedid(), and mark the preferred
@@ -734,7 +740,7 @@ struct drm_connector_helper_funcs {
* Finally drivers that support audio probably want to update the ELD
* data, too, using drm_edid_to_eld().
*
- * This function is only called after the ->detect() hook has indicated
+ * This function is only called after the @detect hook has indicated
* that a sink is connected and when the EDID isn't overridden through
* sysfs or the kernel commandline.
*
@@ -767,8 +773,8 @@ struct drm_connector_helper_funcs {
*
* RETURNS:
*
- * Either MODE_OK or one of the failure reasons in enum
- * &drm_mode_status.
+ * Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum
+ * drm_mode_status.
*/
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
@@ -874,7 +880,7 @@ struct drm_plane_helper_funcs {
* RETURNS:
*
* 0 on success or one of the following negative error codes allowed by
- * the atomic_commit hook in &drm_mode_config_funcs. When using helpers
+ * the &drm_mode_config_funcs.atomic_commit vfunc. When using helpers
* this callback is the only one which can fail an atomic commit,
* everything else must complete successfully.
*/
@@ -897,7 +903,7 @@ struct drm_plane_helper_funcs {
*
* Drivers should check plane specific constraints in this hook.
*
- * When using drm_atomic_helper_check_planes() plane's ->atomic_check()
+ * When using drm_atomic_helper_check_planes() plane's @atomic_check
* hooks are called before the ones for CRTCs, which allows drivers to
* request shared resources that the CRTC controls here. For more
* complicated dependencies the driver can call the provided check helpers
@@ -906,7 +912,7 @@ struct drm_plane_helper_funcs {
*
* This function is also allowed to inspect any other object's state and
* can add more state objects to the atomic commit if needed. Care must
- * be taken though to ensure that state check&compute functions for
+ * be taken though to ensure that state check and compute functions for
* these added states are all called, and derived state in other objects
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
@@ -935,8 +941,8 @@ struct drm_plane_helper_funcs {
* @atomic_update:
*
* Drivers should use this function to update the plane state. This
- * hook is called in-between the ->atomic_begin() and
- * ->atomic_flush() of &drm_crtc_helper_funcs.
+ * hook is called in-between the &drm_crtc_helper_funcs.atomic_begin and
+ * drm_crtc_helper_funcs.atomic_flush callbacks.
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
@@ -952,14 +958,15 @@ struct drm_plane_helper_funcs {
* @atomic_disable:
*
* Drivers should use this function to unconditionally disable a plane.
- * This hook is called in-between the ->atomic_begin() and
- * ->atomic_flush() of &drm_crtc_helper_funcs. It is an alternative to
+ * This hook is called in-between the
+ * &drm_crtc_helper_funcs.atomic_begin and
+ * drm_crtc_helper_funcs.atomic_flush callbacks. It is an alternative to
* @atomic_update, which will be called for disabling planes, too, if
* the @atomic_disable hook isn't implemented.
*
* This hook is also useful to disable planes in preparation of a modeset,
* by calling drm_atomic_helper_disable_planes_on_crtc() from the
- * ->disable() hook in &drm_crtc_helper_funcs.
+ * &drm_crtc_helper_funcs.disable hook.
*
* Note that the power state of the display pipe when this function is
* called depends upon the exact helpers and calling sequence the driver
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index d918ce45ec2c..96d39fbd12ca 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -64,7 +64,7 @@ struct drm_modeset_acquire_ctx {
/**
* struct drm_modeset_lock - used for locking modeset resources.
* @mutex: resource locking
- * @head: used to hold it's place on state->locked list when
+ * @head: used to hold it's place on &drm_atomi_state.locked list when
* part of an atomic update
*
* Used for locking CRTCs and other modeset resources.
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 220d1e2b3db1..4b76cf2d5a7b 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -193,9 +193,9 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
int drm_panel_detach(struct drm_panel *panel);
#ifdef CONFIG_OF
-struct drm_panel *of_drm_find_panel(struct device_node *np);
+struct drm_panel *of_drm_find_panel(const struct device_node *np);
#else
-static inline struct drm_panel *of_drm_find_panel(struct device_node *np)
+static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
return NULL;
}
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index db3bbdeb36d5..20867b4371ab 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -247,19 +247,19 @@ struct drm_plane_funcs {
* @atomic_duplicate_state:
*
* Duplicate the current atomic state for this plane and return it.
- * The core and helpers gurantee that any atomic state duplicated with
+ * The core and helpers guarantee that any atomic state duplicated with
* this hook and still owned by the caller (i.e. not transferred to the
- * driver by calling ->atomic_commit() from struct
- * &drm_mode_config_funcs) will be cleaned up by calling the
- * @atomic_destroy_state hook in this structure.
+ * driver by calling &drm_mode_config_funcs.atomic_commit) will be
+ * cleaned up by calling the @atomic_destroy_state hook in this
+ * structure.
*
- * Atomic drivers which don't subclass struct &drm_plane_state should use
+ * Atomic drivers which don't subclass &struct drm_plane_state should use
* drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
* __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
* duplicated in a consistent fashion across drivers.
*
- * It is an error to call this hook before plane->state has been
+ * It is an error to call this hook before &drm_plane.state has been
* initialized correctly.
*
* NOTE:
@@ -372,7 +372,7 @@ struct drm_plane_funcs {
*
* This optional hook should be used to unregister the additional
* userspace interfaces attached to the plane from
- * late_unregister(). It is called from drm_dev_unregister(),
+ * @late_register. It is called from drm_dev_unregister(),
* early in the driver unload sequence to disable userspace access
* before data structures are torndown.
*/
@@ -381,7 +381,7 @@ struct drm_plane_funcs {
/**
* @atomic_print_state:
*
- * If driver subclasses struct &drm_plane_state, it should implement
+ * If driver subclasses &struct drm_plane_state, it should implement
* this optional hook for printing additional driver specific state.
*
* Do not call this directly, use drm_atomic_plane_print_state()
@@ -423,8 +423,8 @@ enum drm_plane_type {
*
* Primary planes represent a "main" plane for a CRTC. Primary planes
* are the planes operated upon by CRTC modesetting and flipping
- * operations described in the page_flip and set_config hooks in struct
- * &drm_crtc_funcs.
+ * operations described in the &drm_crtc_funcs.page_flip and
+ * &drm_crtc_funcs.set_config hooks.
*/
DRM_PLANE_TYPE_PRIMARY,
@@ -470,9 +470,9 @@ struct drm_plane {
/**
* @mutex:
*
- * Protects modeset plane state, together with the mutex of &drm_crtc
- * this plane is linked to (when active, getting actived or getting
- * disabled).
+ * Protects modeset plane state, together with the &drm_crtc.mutex of
+ * CRTC this plane is linked to (when active, getting activated or
+ * getting disabled).
*/
struct drm_modeset_lock mutex;
@@ -580,7 +580,7 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
*
* Iterate over all legacy planes of @dev, excluding primary and cursor planes.
* This is useful for implementing userspace apis when userspace is not
- * universal plane aware. See also enum &drm_plane_type.
+ * universal plane aware. See also &enum drm_plane_type.
*/
#define drm_for_each_legacy_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 1adf84aea622..7d98763c0444 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -60,26 +60,27 @@
/**
* struct drm_printer - drm output "stream"
- * @printfn: actual output fxn
- * @arg: output fxn specific data
*
* Do not use struct members directly. Use drm_printer_seq_file(),
* drm_printer_info(), etc to initialize. And drm_printf() for output.
*/
struct drm_printer {
+ /* private: */
void (*printfn)(struct drm_printer *p, struct va_format *vaf);
void *arg;
+ const char *prefix;
};
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
+void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
void drm_printf(struct drm_printer *p, const char *f, ...);
/**
* drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
- * @f: the struct &seq_file to output to
+ * @f: the &struct seq_file to output to
*
* RETURNS:
* The &drm_printer object
@@ -95,7 +96,7 @@ static inline struct drm_printer drm_seq_file_printer(struct seq_file *f)
/**
* drm_info_printer - construct a &drm_printer that outputs to dev_printk()
- * @dev: the struct &device pointer
+ * @dev: the &struct device pointer
*
* RETURNS:
* The &drm_printer object
@@ -109,4 +110,19 @@ static inline struct drm_printer drm_info_printer(struct device *dev)
return p;
}
+/**
+ * drm_debug_printer - construct a &drm_printer that outputs to pr_debug()
+ * @prefix: debug output prefix
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer drm_debug_printer(const char *prefix)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_debug,
+ .prefix = prefix
+ };
+ return p;
+}
#endif /* DRM_PRINT_H_ */
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 43c4b6a2046d..f66fdb47551c 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -30,7 +30,7 @@
/**
* struct drm_property_enum - symbolic values for enumerations
* @value: numeric property value for this enum entry
- * @head: list of enum values, linked to enum_list in &drm_property
+ * @head: list of enum values, linked to &drm_property.enum_list
* @name: symbolic name for the enum
*
* For enumeration and bitmask properties this structure stores the symbolic
@@ -191,9 +191,9 @@ struct drm_property {
* struct drm_property_blob - Blob data for &drm_property
* @base: base KMS object
* @dev: DRM device
- * @head_global: entry on the global blob list in &drm_mode_config
- * property_blob_list.
- * @head_file: entry on the per-file blob list in &drm_file blobs list.
+ * @head_global: entry on the global blob list in
+ * &drm_mode_config.property_blob_list.
+ * @head_file: entry on the per-file blob list in &drm_file.blobs list.
* @length: size of the blob in bytes, invariant over the lifetime of the object
* @data: actual data, embedded at the end of this structure
*
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 01a8436ccb0a..fffbb95a0915 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -10,6 +10,10 @@
#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H
#define __LINUX_DRM_SIMPLE_KMS_HELPER_H
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_plane.h>
+
struct drm_simple_display_pipe;
/**
@@ -73,9 +77,9 @@ struct drm_simple_display_pipe_funcs {
/**
* @prepare_fb:
*
- * Optional, called by struct &drm_plane_helper_funcs ->prepare_fb .
- * Please read the documentation for the ->prepare_fb hook in
- * struct &drm_plane_helper_funcs for more details.
+ * Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read
+ * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for
+ * more details.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
@@ -83,9 +87,9 @@ struct drm_simple_display_pipe_funcs {
/**
* @cleanup_fb:
*
- * Optional, called by struct &drm_plane_helper_funcs ->cleanup_fb .
- * Please read the documentation for the ->cleanup_fb hook in
- * struct &drm_plane_helper_funcs for more details.
+ * Optional, called by &drm_plane_helper_funcs.cleanup_fb. Please read
+ * the documentation for the &drm_plane_helper_funcs.cleanup_fb hook for
+ * more details.
*/
void (*cleanup_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
@@ -114,8 +118,6 @@ struct drm_simple_display_pipe {
int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
struct drm_bridge *bridge);
-void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe);
-
int drm_simple_display_pipe_init(struct drm_device *dev,
struct drm_simple_display_pipe *pipe,
const struct drm_simple_display_pipe_funcs *funcs,
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 0d5f4268d75f..a1dd21d6b723 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -226,23 +226,18 @@
INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
-#define INTEL_BDW_RSVDM_IDS(info) \
+#define INTEL_BDW_RSVD_IDS(info) \
INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
- INTEL_VGA_DEVICE(0x163E, info) /* ULX */
-
-#define INTEL_BDW_RSVDD_IDS(info) \
+ INTEL_VGA_DEVICE(0x163E, info), /* ULX */ \
INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
#define INTEL_BDW_IDS(info) \
INTEL_BDW_GT12_IDS(info), \
INTEL_BDW_GT3_IDS(info), \
- INTEL_BDW_RSVDM_IDS(info), \
- INTEL_BDW_GT12_IDS(info), \
- INTEL_BDW_GT3_IDS(info), \
- INTEL_BDW_RSVDD_IDS(info)
+ INTEL_BDW_RSVD_IDS(info)
#define INTEL_CHV_IDS(info) \
INTEL_VGA_DEVICE(0x22b0, info), \
@@ -270,14 +265,14 @@
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
- INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */
+ INTEL_VGA_DEVICE(0x192B, info) /* Halo GT3 */ \
#define INTEL_SKL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \
INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \
- INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */
+ INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \
+ INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */
#define INTEL_SKL_IDS(info) \
INTEL_SKL_GT1_IDS(info), \
@@ -292,6 +287,10 @@
INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
+#define INTEL_GLK_IDS(info) \
+ INTEL_VGA_DEVICE(0x3184, info), \
+ INTEL_VGA_DEVICE(0x3185, info)
+
#define INTEL_KBL_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index f49edecd66a3..b3bf717cfc45 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,8 +3,10 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
-void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
- phys_addr_t *mappable_base, u64 *mappable_end);
+void intel_gtt_get(u64 *gtt_total,
+ u32 *stolen_size,
+ phys_addr_t *mappable_base,
+ u64 *mappable_end);
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
diff --git a/include/drm/tinydrm/ili9341.h b/include/drm/tinydrm/ili9341.h
new file mode 100644
index 000000000000..807a09f43cad
--- /dev/null
+++ b/include/drm/tinydrm/ili9341.h
@@ -0,0 +1,54 @@
+/*
+ * ILI9341 LCD controller
+ *
+ * Copyright 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_ILI9341_H
+#define __LINUX_ILI9341_H
+
+#define ILI9341_FRMCTR1 0xb1
+#define ILI9341_FRMCTR2 0xb2
+#define ILI9341_FRMCTR3 0xb3
+#define ILI9341_INVTR 0xb4
+#define ILI9341_PRCTR 0xb5
+#define ILI9341_DISCTRL 0xb6
+#define ILI9341_ETMOD 0xb7
+
+#define ILI9341_PWCTRL1 0xc0
+#define ILI9341_PWCTRL2 0xc1
+#define ILI9341_VMCTRL1 0xc5
+#define ILI9341_VMCTRL2 0xc7
+#define ILI9341_PWCTRLA 0xcb
+#define ILI9341_PWCTRLB 0xcf
+
+#define ILI9341_RDID1 0xda
+#define ILI9341_RDID2 0xdb
+#define ILI9341_RDID3 0xdc
+#define ILI9341_RDID4 0xd3
+
+#define ILI9341_PGAMCTRL 0xe0
+#define ILI9341_NGAMCTRL 0xe1
+#define ILI9341_DGAMCTRL1 0xe2
+#define ILI9341_DGAMCTRL2 0xe3
+#define ILI9341_DTCTRLA 0xe8
+#define ILI9341_DTCTRLB 0xea
+#define ILI9341_PWRSEQ 0xed
+
+#define ILI9341_EN3GAM 0xf2
+#define ILI9341_IFCTRL 0xf6
+#define ILI9341_PUMPCTRL 0xf7
+
+#define ILI9341_MADCTL_MH BIT(2)
+#define ILI9341_MADCTL_BGR BIT(3)
+#define ILI9341_MADCTL_ML BIT(4)
+#define ILI9341_MADCTL_MV BIT(5)
+#define ILI9341_MADCTL_MX BIT(6)
+#define ILI9341_MADCTL_MY BIT(7)
+
+#endif /* __LINUX_ILI9341_H */
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
new file mode 100644
index 000000000000..d137b16ee873
--- /dev/null
+++ b/include/drm/tinydrm/mipi-dbi.h
@@ -0,0 +1,107 @@
+/*
+ * MIPI Display Bus Interface (DBI) LCD controller support
+ *
+ * Copyright 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_MIPI_DBI_H
+#define __LINUX_MIPI_DBI_H
+
+#include <drm/tinydrm/tinydrm.h>
+
+struct spi_device;
+struct gpio_desc;
+struct regulator;
+
+/**
+ * struct mipi_dbi - MIPI DBI controller
+ * @tinydrm: tinydrm base
+ * @spi: SPI device
+ * @enabled: Pipeline is enabled
+ * @cmdlock: Command lock
+ * @command: Bus specific callback executing commands.
+ * @read_commands: Array of read commands terminated by a zero entry.
+ * Reading is disabled if this is NULL.
+ * @dc: Optional D/C gpio.
+ * @tx_buf: Buffer used for transfer (copy clip rect area)
+ * @tx_buf9: Buffer used for Option 1 9-bit conversion
+ * @tx_buf9_len: Size of tx_buf9.
+ * @swap_bytes: Swap bytes in buffer before transfer
+ * @reset: Optional reset gpio
+ * @rotation: initial rotation in degrees Counter Clock Wise
+ * @backlight: backlight device (optional)
+ * @regulator: power regulator (optional)
+ */
+struct mipi_dbi {
+ struct tinydrm_device tinydrm;
+ struct spi_device *spi;
+ bool enabled;
+ struct mutex cmdlock;
+ int (*command)(struct mipi_dbi *mipi, u8 cmd, u8 *param, size_t num);
+ const u8 *read_commands;
+ struct gpio_desc *dc;
+ u16 *tx_buf;
+ void *tx_buf9;
+ size_t tx_buf9_len;
+ bool swap_bytes;
+ struct gpio_desc *reset;
+ unsigned int rotation;
+ struct backlight_device *backlight;
+ struct regulator *regulator;
+};
+
+static inline struct mipi_dbi *
+mipi_dbi_from_tinydrm(struct tinydrm_device *tdev)
+{
+ return container_of(tdev, struct mipi_dbi, tinydrm);
+}
+
+int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
+ struct gpio_desc *dc,
+ const struct drm_simple_display_pipe_funcs *pipe_funcs,
+ struct drm_driver *driver,
+ const struct drm_display_mode *mode,
+ unsigned int rotation);
+int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
+ const struct drm_simple_display_pipe_funcs *pipe_funcs,
+ struct drm_driver *driver,
+ const struct drm_display_mode *mode, unsigned int rotation);
+void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state);
+void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
+void mipi_dbi_hw_reset(struct mipi_dbi *mipi);
+bool mipi_dbi_display_is_on(struct mipi_dbi *mipi);
+
+int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
+int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
+
+/**
+ * mipi_dbi_command - MIPI DCS command with optional parameter(s)
+ * @mipi: MIPI structure
+ * @cmd: Command
+ * @seq...: Optional parameter(s)
+ *
+ * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for
+ * get/read.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+#define mipi_dbi_command(mipi, cmd, seq...) \
+({ \
+ u8 d[] = { seq }; \
+ mipi_dbi_command_buf(mipi, cmd, d, ARRAY_SIZE(d)); \
+})
+
+#ifdef CONFIG_DEBUG_FS
+int mipi_dbi_debugfs_init(struct drm_minor *minor);
+#else
+#define mipi_dbi_debugfs_init NULL
+#endif
+
+#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h
new file mode 100644
index 000000000000..9b9b6cfe3ba5
--- /dev/null
+++ b/include/drm/tinydrm/tinydrm-helpers.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TINYDRM_HELPERS_H
+#define __LINUX_TINYDRM_HELPERS_H
+
+struct backlight_device;
+struct tinydrm_device;
+struct drm_clip_rect;
+struct spi_transfer;
+struct spi_message;
+struct spi_device;
+struct device;
+
+/**
+ * tinydrm_machine_little_endian - Machine is little endian
+ *
+ * Returns:
+ * true if *defined(__LITTLE_ENDIAN)*, false otherwise
+ */
+static inline bool tinydrm_machine_little_endian(void)
+{
+#if defined(__LITTLE_ENDIAN)
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool tinydrm_merge_clips(struct drm_clip_rect *dst,
+ struct drm_clip_rect *src, unsigned int num_clips,
+ unsigned int flags, u32 max_width, u32 max_height);
+void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip);
+void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip);
+void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip, bool swap);
+
+struct backlight_device *tinydrm_of_find_backlight(struct device *dev);
+int tinydrm_enable_backlight(struct backlight_device *backlight);
+int tinydrm_disable_backlight(struct backlight_device *backlight);
+
+size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len);
+bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw);
+int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
+ struct spi_transfer *header, u8 bpw, const void *buf,
+ size_t len);
+void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m);
+
+#ifdef DEBUG
+/**
+ * tinydrm_dbg_spi_message - Dump SPI message
+ * @spi: SPI device
+ * @m: SPI message
+ *
+ * Dumps info about the transfers in a SPI message including buffer content.
+ * DEBUG has to be defined for this function to be enabled alongside setting
+ * the DRM_UT_DRIVER bit of &drm_debug.
+ */
+static inline void tinydrm_dbg_spi_message(struct spi_device *spi,
+ struct spi_message *m)
+{
+ if (drm_debug & DRM_UT_DRIVER)
+ _tinydrm_dbg_spi_message(spi, m);
+}
+#else
+static inline void tinydrm_dbg_spi_message(struct spi_device *spi,
+ struct spi_message *m)
+{
+}
+#endif /* DEBUG */
+
+#endif /* __LINUX_TINYDRM_HELPERS_H */
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
new file mode 100644
index 000000000000..cf9ca207b8b1
--- /dev/null
+++ b/include/drm/tinydrm/tinydrm.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TINYDRM_H
+#define __LINUX_TINYDRM_H
+
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+/**
+ * struct tinydrm_device - tinydrm device
+ * @drm: DRM device
+ * @pipe: Display pipe structure
+ * @dirty_lock: Serializes framebuffer flushing
+ * @fbdev_cma: CMA fbdev structure
+ * @suspend_state: Atomic state when suspended
+ * @fb_funcs: Framebuffer functions used when creating framebuffers
+ */
+struct tinydrm_device {
+ struct drm_device *drm;
+ struct drm_simple_display_pipe pipe;
+ struct mutex dirty_lock;
+ struct drm_fbdev_cma *fbdev_cma;
+ struct drm_atomic_state *suspend_state;
+ const struct drm_framebuffer_funcs *fb_funcs;
+};
+
+static inline struct tinydrm_device *
+pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
+{
+ return container_of(pipe, struct tinydrm_device, pipe);
+}
+
+/**
+ * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations
+ *
+ * This macro provides a shortcut for setting the tinydrm GEM operations in
+ * the &drm_driver structure.
+ */
+#define TINYDRM_GEM_DRIVER_OPS \
+ .gem_free_object = tinydrm_gem_cma_free_object, \
+ .gem_vm_ops = &drm_gem_cma_vm_ops, \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import = drm_gem_prime_import, \
+ .gem_prime_export = drm_gem_prime_export, \
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, \
+ .gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \
+ .gem_prime_vmap = drm_gem_cma_prime_vmap, \
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \
+ .gem_prime_mmap = drm_gem_cma_prime_mmap, \
+ .dumb_create = drm_gem_cma_dumb_create, \
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset, \
+ .dumb_destroy = drm_gem_dumb_destroy, \
+ .fops = &tinydrm_fops
+
+/**
+ * TINYDRM_MODE - tinydrm display mode
+ * @hd: Horizontal resolution, width
+ * @vd: Vertical resolution, height
+ * @hd_mm: Display width in millimeters
+ * @vd_mm: Display height in millimeters
+ *
+ * This macro creates a &drm_display_mode for use with tinydrm.
+ */
+#define TINYDRM_MODE(hd, vd, hd_mm, vd_mm) \
+ .hdisplay = (hd), \
+ .hsync_start = (hd), \
+ .hsync_end = (hd), \
+ .htotal = (hd), \
+ .vdisplay = (vd), \
+ .vsync_start = (vd), \
+ .vsync_end = (vd), \
+ .vtotal = (vd), \
+ .width_mm = (hd_mm), \
+ .height_mm = (vd_mm), \
+ .type = DRM_MODE_TYPE_DRIVER, \
+ .clock = 1 /* pass validation */
+
+extern const struct file_operations tinydrm_fops;
+void tinydrm_lastclose(struct drm_device *drm);
+void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj);
+struct drm_gem_object *
+tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
+ const struct drm_framebuffer_funcs *fb_funcs,
+ struct drm_driver *driver);
+int devm_tinydrm_register(struct tinydrm_device *tdev);
+void tinydrm_shutdown(struct tinydrm_device *tdev);
+int tinydrm_suspend(struct tinydrm_device *tdev);
+int tinydrm_resume(struct tinydrm_device *tdev);
+
+void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state);
+int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+int
+tinydrm_display_pipe_init(struct tinydrm_device *tdev,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ int connector_type,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const struct drm_display_mode *mode,
+ unsigned int rotation);
+
+#endif /* __LINUX_TINYDRM_H */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 9a465314572c..8f619f499e55 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -215,6 +215,8 @@ struct ttm_buffer_object {
struct drm_vma_offset_node vma_node;
+ unsigned priority;
+
/**
* Special members that are protected by the reserve lock
* and the bo::lock when written to. Can be read with
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index feecf33a1212..8145773c582c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -42,6 +42,8 @@
#include <linux/spinlock.h>
#include <linux/reservation.h>
+#define TTM_MAX_BO_PRIORITY 16U
+
struct ttm_backend_func {
/**
* struct ttm_backend_func member bind
@@ -298,7 +300,7 @@ struct ttm_mem_type_manager {
* Protected by the global->lru_lock.
*/
- struct list_head lru;
+ struct list_head lru[TTM_MAX_BO_PRIORITY];
/*
* Protected by @move_lock.
@@ -431,9 +433,15 @@ struct ttm_bo_driver {
int (*verify_access)(struct ttm_buffer_object *bo,
struct file *filp);
- /* hook to notify driver about a driver move so it
- * can do tiling things */
+ /**
+ * Hook to notify driver about a driver move so it
+ * can do tiling things and book-keeping.
+ *
+ * @evict: whether this move is evicting the buffer from the graphics
+ * address space
+ */
void (*move_notify)(struct ttm_buffer_object *bo,
+ bool evict,
struct ttm_mem_reg *new_mem);
/* notify the driver we are taking a fault on this BO
* and have reserved it */
@@ -454,18 +462,6 @@ struct ttm_bo_driver {
struct ttm_mem_reg *mem);
void (*io_mem_free)(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
-
- /**
- * Optional driver callback for when BO is removed from the LRU.
- * Called with LRU lock held immediately before the removal.
- */
- void (*lru_removal)(struct ttm_buffer_object *bo);
-
- /**
- * Return the list_head after which a BO should be inserted in the LRU.
- */
- struct list_head *(*lru_tail)(struct ttm_buffer_object *bo);
- struct list_head *(*swap_lru_tail)(struct ttm_buffer_object *bo);
};
/**
@@ -512,7 +508,7 @@ struct ttm_bo_global {
/**
* Protected by the lru_lock.
*/
- struct list_head swap_lru;
+ struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
/**
* Internal protection.
@@ -780,9 +776,6 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo);
-struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo);
-
/**
* __ttm_bo_reserve:
*
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 8daeb3ce0016..bfb3704fc6fc 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -39,23 +39,6 @@ struct dma_buf_attachment;
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @attach: [optional] allows different devices to 'attach' themselves to the
- * given buffer. It might return -EBUSY to signal that backing storage
- * is already allocated and incompatible with the requirements
- * of requesting device.
- * @detach: [optional] detach a given device from this buffer.
- * @map_dma_buf: returns list of scatter pages allocated, increases usecount
- * of the buffer. Requires atleast one attach to be called
- * before. Returned sg list should already be mapped into
- * _device_ address space. This call may sleep. May also return
- * -EINTR. Should return -EINVAL if attach hasn't been called yet.
- * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
- * pages.
- * @release: release this buffer; to be called after the last dma_buf_put.
- * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
- * caches and allocate backing storage (if not yet done)
- * respectively pin the object into memory.
- * @end_cpu_access: [optional] called after cpu access to flush caches.
* @kmap_atomic: maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
* This callback must not sleep.
@@ -63,43 +46,206 @@ struct dma_buf_attachment;
* This Callback must not sleep.
* @kmap: maps a page from the buffer into kernel address space.
* @kunmap: [optional] unmaps a page from the buffer.
- * @mmap: used to expose the backing storage to userspace. Note that the
- * mapping needs to be coherent - if the exporter doesn't directly
- * support this, it needs to fake coherency by shooting down any ptes
- * when transitioning away from the cpu domain.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
* @vunmap: [optional] unmaps a vmap from the buffer
*/
struct dma_buf_ops {
+ /**
+ * @attach:
+ *
+ * This is called from dma_buf_attach() to make sure that a given
+ * &device can access the provided &dma_buf. Exporters which support
+ * buffer objects in special locations like VRAM or device-specific
+ * carveout areas should check whether the buffer could be move to
+ * system memory (or directly accessed by the provided device), and
+ * otherwise need to fail the attach operation.
+ *
+ * The exporter should also in general check whether the current
+ * allocation fullfills the DMA constraints of the new device. If this
+ * is not the case, and the allocation cannot be moved, it should also
+ * fail the attach operation.
+ *
+ * Any exporter-private housekeeping data can be stored in the
+ * &dma_buf_attachment.priv pointer.
+ *
+ * This callback is optional.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure. It might return -EBUSY
+ * to signal that backing storage is already allocated and incompatible
+ * with the requirements of requesting device.
+ */
int (*attach)(struct dma_buf *, struct device *,
- struct dma_buf_attachment *);
+ struct dma_buf_attachment *);
+ /**
+ * @detach:
+ *
+ * This is called by dma_buf_detach() to release a &dma_buf_attachment.
+ * Provided so that exporters can clean up any housekeeping for an
+ * &dma_buf_attachment.
+ *
+ * This callback is optional.
+ */
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
- /* For {map,unmap}_dma_buf below, any specific buffer attributes
- * required should get added to device_dma_parameters accessible
- * via dev->dma_params.
+ /**
+ * @map_dma_buf:
+ *
+ * This is called by dma_buf_map_attachment() and is used to map a
+ * shared &dma_buf into device address space, and it is mandatory. It
+ * can only be called if @attach has been called successfully. This
+ * essentially pins the DMA buffer into place, and it cannot be moved
+ * any more
+ *
+ * This call may sleep, e.g. when the backing storage first needs to be
+ * allocated, or moved to a location suitable for all currently attached
+ * devices.
+ *
+ * Note that any specific buffer attributes required for this function
+ * should get added to device_dma_parameters accessible via
+ * &device.dma_params from the &dma_buf_attachment. The @attach callback
+ * should also check these constraints.
+ *
+ * If this is being called for the first time, the exporter can now
+ * choose to scan through the list of attachments for this buffer,
+ * collate the requirements of the attached devices, and choose an
+ * appropriate backing storage for the buffer.
+ *
+ * Based on enum dma_data_direction, it might be possible to have
+ * multiple users accessing at the same time (for reading, maybe), or
+ * any other kind of sharing that the exporter might wish to make
+ * available to buffer-users.
+ *
+ * Returns:
+ *
+ * A &sg_table scatter list of or the backing storage of the DMA buffer,
+ * already mapped into the device address space of the &device attached
+ * with the provided &dma_buf_attachment.
+ *
+ * On failure, returns a negative error value wrapped into a pointer.
+ * May also return -EINTR when a signal was received while being
+ * blocked.
*/
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
- enum dma_data_direction);
+ enum dma_data_direction);
+ /**
+ * @unmap_dma_buf:
+ *
+ * This is called by dma_buf_unmap_attachment() and should unmap and
+ * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
+ * It should also unpin the backing storage if this is the last mapping
+ * of the DMA buffer, it the exporter supports backing storage
+ * migration.
+ */
void (*unmap_dma_buf)(struct dma_buf_attachment *,
- struct sg_table *,
- enum dma_data_direction);
+ struct sg_table *,
+ enum dma_data_direction);
+
/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
* if the call would block.
*/
- /* after final dma_buf_put() */
+ /**
+ * @release:
+ *
+ * Called after the last dma_buf_put to release the &dma_buf, and
+ * mandatory.
+ */
void (*release)(struct dma_buf *);
+ /**
+ * @begin_cpu_access:
+ *
+ * This is called from dma_buf_begin_cpu_access() and allows the
+ * exporter to ensure that the memory is actually available for cpu
+ * access - the exporter might need to allocate or swap-in and pin the
+ * backing storage. The exporter also needs to ensure that cpu access is
+ * coherent for the access direction. The direction can be used by the
+ * exporter to optimize the cache flushing, i.e. access with a different
+ * direction (read instead of write) might return stale or even bogus
+ * data (e.g. when the exporter needs to copy the data to temporary
+ * storage).
+ *
+ * This callback is optional.
+ *
+ * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
+ * from userspace (where storage shouldn't be pinned to avoid handing
+ * de-factor mlock rights to userspace) and for the kernel-internal
+ * users of the various kmap interfaces, where the backing storage must
+ * be pinned to guarantee that the atomic kmap calls can succeed. Since
+ * there's no in-kernel users of the kmap interfaces yet this isn't a
+ * real problem.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. This can for
+ * example fail when the backing storage can't be allocated. Can also
+ * return -ERESTARTSYS or -EINTR when the call has been interrupted and
+ * needs to be restarted.
+ */
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
+
+ /**
+ * @end_cpu_access:
+ *
+ * This is called from dma_buf_end_cpu_access() when the importer is
+ * done accessing the CPU. The exporter can use this to flush caches and
+ * unpin any resources pinned in @begin_cpu_access.
+ * The result of any dma_buf kmap calls after end_cpu_access is
+ * undefined.
+ *
+ * This callback is optional.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. Can return
+ * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
+ * to be restarted.
+ */
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
void (*kunmap)(struct dma_buf *, unsigned long, void *);
+ /**
+ * @mmap:
+ *
+ * This callback is used by the dma_buf_mmap() function
+ *
+ * Note that the mapping needs to be incoherent, userspace is expected
+ * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
+ *
+ * Because dma-buf buffers have invariant size over their lifetime, the
+ * dma-buf core checks whether a vma is too large and rejects such
+ * mappings. The exporter hence does not need to duplicate this check.
+ * Drivers do not need to check this themselves.
+ *
+ * If an exporter needs to manually flush caches and hence needs to fake
+ * coherency for mmap support, it needs to be able to zap all the ptes
+ * pointing at the backing storage. Now linux mm needs a struct
+ * address_space associated with the struct file stored in vma->vm_file
+ * to do that with the function unmap_mapping_range. But the dma_buf
+ * framework only backs every dma_buf fd with the anon_file struct file,
+ * i.e. all dma_bufs share the same file.
+ *
+ * Hence exporters need to setup their own file (and address_space)
+ * association by setting vma->vm_file and adjusting vma->vm_pgoff in
+ * the dma_buf mmap callback. In the specific case of a gem driver the
+ * exporter could use the shmem file already provided by gem (and set
+ * vm_pgoff = 0). Exporters can then zap ptes by unmapping the
+ * corresponding range of the struct address_space associated with their
+ * own file.
+ *
+ * This callback is optional.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure.
+ */
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
void *(*vmap)(struct dma_buf *);
@@ -124,6 +270,15 @@ struct dma_buf_ops {
* @poll: for userspace poll support
* @cb_excl: for userspace poll support
* @cb_shared: for userspace poll support
+ *
+ * This represents a shared buffer, created by calling dma_buf_export(). The
+ * userspace representation is a normal file descriptor, which can be created by
+ * calling dma_buf_fd().
+ *
+ * Shared dma buffers are reference counted using dma_buf_put() and
+ * get_dma_buf().
+ *
+ * Device DMA access is handled by the separate &struct dma_buf_attachment.
*/
struct dma_buf {
size_t size;
@@ -160,6 +315,11 @@ struct dma_buf {
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
* attached to the buffer.
+ *
+ * An attachment is created by calling dma_buf_attach(), and released again by
+ * calling dma_buf_detach(). The DMA mapping itself needed to initiate a
+ * transfer is created by dma_buf_map_attachment() and freed again by calling
+ * dma_buf_unmap_attachment().
*/
struct dma_buf_attachment {
struct dma_buf *dmabuf;
@@ -192,9 +352,11 @@ struct dma_buf_export_info {
};
/**
- * helper macro for exporters; zeros and fills in most common values
- *
+ * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
* @name: export-info name
+ *
+ * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
+ * zeroes it out and pre-populates exp_name in it.
*/
#define DEFINE_DMA_BUF_EXPORT_INFO(name) \
struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index d51a7d23c358..6048fa404e57 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -47,7 +47,7 @@ struct dma_fence_cb;
* can be compared to decide which fence would be signaled later.
* @flags: A mask of DMA_FENCE_FLAG_* defined below
* @timestamp: Timestamp when the fence was signaled.
- * @status: Optional, only valid if < 0, must be set before calling
+ * @error: Optional, only valid if < 0, must be set before calling
* dma_fence_signal, indicates that the fence has completed with an error.
*
* the flags member must be manipulated and read using the appropriate
@@ -79,7 +79,7 @@ struct dma_fence {
unsigned seqno;
unsigned long flags;
ktime_t timestamp;
- int status;
+ int error;
};
enum dma_fence_flag_bits {
@@ -133,7 +133,7 @@ struct dma_fence_cb {
* or some failure occurred that made it impossible to enable
* signaling. True indicates successful enabling.
*
- * fence->status may be set in enable_signaling, but only when false is
+ * fence->error may be set in enable_signaling, but only when false is
* returned.
*
* Calling dma_fence_signal before enable_signaling is called allows
@@ -145,7 +145,7 @@ struct dma_fence_cb {
* the second time will be a noop since it was already signaled.
*
* Notes on signaled:
- * May set fence->status if returning true.
+ * May set fence->error if returning true.
*
* Notes on wait:
* Must not be NULL, set to dma_fence_default_wait for default implementation.
@@ -378,6 +378,50 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
return dma_fence_is_signaled(f2) ? NULL : f2;
}
+/**
+ * dma_fence_get_status_locked - returns the status upon completion
+ * @fence: [in] the dma_fence to query
+ *
+ * Drivers can supply an optional error status condition before they signal
+ * the fence (to indicate whether the fence was completed due to an error
+ * rather than success). The value of the status condition is only valid
+ * if the fence has been signaled, dma_fence_get_status_locked() first checks
+ * the signal state before reporting the error status.
+ *
+ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
+ * been signaled without an error condition, or a negative error code
+ * if the fence has been completed in err.
+ */
+static inline int dma_fence_get_status_locked(struct dma_fence *fence)
+{
+ if (dma_fence_is_signaled_locked(fence))
+ return fence->error ?: 1;
+ else
+ return 0;
+}
+
+int dma_fence_get_status(struct dma_fence *fence);
+
+/**
+ * dma_fence_set_error - flag an error condition on the fence
+ * @fence: [in] the dma_fence
+ * @error: [in] the error to store
+ *
+ * Drivers can supply an optional error status condition before they signal
+ * the fence, to indicate that the fence was completed due to an error
+ * rather than success. This must be set before signaling (so that the value
+ * is visible before any waiters on the signal callback are woken). This
+ * helper exists to help catching erroneous setting of #dma_fence.error.
+ */
+static inline void dma_fence_set_error(struct dma_fence *fence,
+ int error)
+{
+ BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+ BUG_ON(error >= 0 || error < -MAX_ERRNO);
+
+ fence->error = error;
+}
+
signed long dma_fence_wait_timeout(struct dma_fence *,
bool intr, signed long timeout);
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
diff --git a/include/linux/prime_numbers.h b/include/linux/prime_numbers.h
new file mode 100644
index 000000000000..14ec4f567342
--- /dev/null
+++ b/include/linux/prime_numbers.h
@@ -0,0 +1,37 @@
+#ifndef __LINUX_PRIME_NUMBERS_H
+#define __LINUX_PRIME_NUMBERS_H
+
+#include <linux/types.h>
+
+bool is_prime_number(unsigned long x);
+unsigned long next_prime_number(unsigned long x);
+
+/**
+ * for_each_prime_number - iterate over each prime upto a value
+ * @prime: the current prime number in this iteration
+ * @max: the upper limit
+ *
+ * Starting from the first prime number 2 iterate over each prime number up to
+ * the @max value. On each iteration, @prime is set to the current prime number.
+ * @max should be less than ULONG_MAX to ensure termination. To begin with
+ * @prime set to 1 on the first iteration use for_each_prime_number_from()
+ * instead.
+ */
+#define for_each_prime_number(prime, max) \
+ for_each_prime_number_from((prime), 2, (max))
+
+/**
+ * for_each_prime_number_from - iterate over each prime upto a value
+ * @prime: the current prime number in this iteration
+ * @from: the initial value
+ * @max: the upper limit
+ *
+ * Starting from @from iterate over each successive prime number up to the
+ * @max value. On each iteration, @prime is set to the current prime number.
+ * @max should be less than ULONG_MAX, and @from less than @max, to ensure
+ * termination.
+ */
+#define for_each_prime_number_from(prime, from, max) \
+ for (prime = (from); prime <= (max); prime = next_prime_number(prime))
+
+#endif /* !__LINUX_PRIME_NUMBERS_H */
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index d9706a6f5ae2..2b5a4679daea 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -145,6 +145,40 @@ reservation_object_get_list(struct reservation_object *obj)
}
/**
+ * reservation_object_lock - lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Locks the reservation object for exclusive access and modification. Note,
+ * that the lock is only against other writers, readers will run concurrently
+ * with a writer under RCU. The seqlock is used to notify readers if they
+ * overlap with a writer.
+ *
+ * As the reservation object may be locked by multiple parties in an
+ * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
+ * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
+ * object may be locked by itself by passing NULL as @ctx.
+ */
+static inline int
+reservation_object_lock(struct reservation_object *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ return ww_mutex_lock(&obj->lock, ctx);
+}
+
+/**
+ * reservation_object_unlock - unlock the reservation object
+ * @obj: the reservation object
+ *
+ * Unlocks the reservation object following exclusive access.
+ */
+static inline void
+reservation_object_unlock(struct reservation_object *obj)
+{
+ ww_mutex_unlock(&obj->lock);
+}
+
+/**
* reservation_object_get_excl - get the reservation object's
* exclusive fence, with update-side lock held
* @obj: the reservation object
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 9355dd8eff3b..c97addd08f8c 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -9,6 +9,7 @@ header-y += i810_drm.h
header-y += i915_drm.h
header-y += mga_drm.h
header-y += nouveau_drm.h
+header-y += omap_drm.h
header-y += qxl_drm.h
header-y += r128_drm.h
header-y += radeon_drm.h
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 396183628f3c..5797283c2d79 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -528,6 +528,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VBIOS_SIZE 0x1
/* Subquery id: Query vbios image */
#define AMDGPU_INFO_VBIOS_IMAGE 0x2
+/* Query UVD handles */
+#define AMDGPU_INFO_NUM_HANDLES 0x1C
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -719,6 +721,13 @@ struct drm_amdgpu_info_hw_ip {
__u32 _pad;
};
+struct drm_amdgpu_info_num_handles {
+ /** Max handles as supported by firmware for UVD */
+ __u32 uvd_max_handles;
+ /** Handles currently in use for UVD */
+ __u32 uvd_used_handles;
+};
+
#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6
struct drm_amdgpu_info_vce_clock_table_entry {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index a5890bf44c0a..ef20abb8119b 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -41,10 +41,17 @@ extern "C" {
/* 8 bpp Red */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+/* 16 bpp Red */
+#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
+
/* 16 bpp RG */
#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
+/* 32 bpp RG */
+#define DRM_FORMAT_RG1616 fourcc_code('R', 'G', '3', '2') /* [31:0] R:G 16:16 little endian */
+#define DRM_FORMAT_GR1616 fourcc_code('G', 'R', '3', '2') /* [31:0] G:R 16:16 little endian */
+
/* 8 bpp RGB */
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
@@ -154,11 +161,13 @@ extern "C" {
/* Vendor Ids: */
#define DRM_FORMAT_MOD_NONE 0
+#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
#define DRM_FORMAT_MOD_VENDOR_NV 0x03
#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
+#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
/* add more to the end as needed */
#define fourcc_mod_code(vendor, val) \
@@ -172,6 +181,16 @@ extern "C" {
* authoritative source for all of these.
*/
+/*
+ * Linear Layout
+ *
+ * Just plain linear layout. Note that this is different from no specifying any
+ * modifier (e.g. not setting DRM_MODE_FB_MODIFIERS in the DRM_ADDFB2 ioctl),
+ * which tells the driver to also take driver-internal information into account
+ * and so might actually result in a tiled framebuffer.
+ */
+#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
+
/* Intel framebuffer modifiers */
/*
@@ -233,6 +252,46 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+/* Vivante framebuffer modifiers */
+
+/*
+ * Vivante 4x4 tiling layout
+ *
+ * This is a simple tiled layout using tiles of 4x4 pixels in a row-major
+ * layout.
+ */
+#define DRM_FORMAT_MOD_VIVANTE_TILED fourcc_mod_code(VIVANTE, 1)
+
+/*
+ * Vivante 64x64 super-tiling layout
+ *
+ * This is a tiled layout using 64x64 pixel super-tiles, where each super-tile
+ * contains 8x4 groups of 2x4 tiles of 4x4 pixels (like above) each, all in row-
+ * major layout.
+ *
+ * For more information: see
+ * https://github.com/etnaviv/etna_viv/blob/master/doc/hardware.md#texture-tiling
+ */
+#define DRM_FORMAT_MOD_VIVANTE_SUPER_TILED fourcc_mod_code(VIVANTE, 2)
+
+/*
+ * Vivante 4x4 tiling layout for dual-pipe
+ *
+ * Same as the 4x4 tiling layout, except every second 4x4 pixel tile starts at a
+ * different base address. Offsets from the base addresses are therefore halved
+ * compared to the non-split tiled layout.
+ */
+#define DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED fourcc_mod_code(VIVANTE, 3)
+
+/*
+ * Vivante 64x64 super-tiling layout for dual-pipe
+ *
+ * Same as the 64x64 super-tiling layout, except every second 4x4 pixel tile
+ * starts at a different base address. Offsets from the base addresses are
+ * therefore halved compared to the non-split super-tiled layout.
+ */
+#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 1c12a350eca3..57093b455db6 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -258,6 +258,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_USERPTR 0x33
#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
+#define DRM_I915_PERF_OPEN 0x36
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -311,6 +312,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
+#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -393,6 +395,7 @@ typedef struct drm_i915_irq_wait {
* priorities and the driver will attempt to execute batches in priority order.
*/
#define I915_PARAM_HAS_SCHEDULER 41
+#define I915_PARAM_HUC_STATUS 42
typedef struct drm_i915_getparam {
__s32 param;
@@ -1224,9 +1227,142 @@ struct drm_i915_gem_context_param {
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
+#define I915_CONTEXT_PARAM_BANNABLE 0x5
__u64 value;
};
+enum drm_i915_oa_format {
+ I915_OA_FORMAT_A13 = 1,
+ I915_OA_FORMAT_A29,
+ I915_OA_FORMAT_A13_B8_C8,
+ I915_OA_FORMAT_B4_C8,
+ I915_OA_FORMAT_A45_B8_C8,
+ I915_OA_FORMAT_B4_C8_A16,
+ I915_OA_FORMAT_C4_B8,
+
+ I915_OA_FORMAT_MAX /* non-ABI */
+};
+
+enum drm_i915_perf_property_id {
+ /**
+ * Open the stream for a specific context handle (as used with
+ * execbuffer2). A stream opened for a specific context this way
+ * won't typically require root privileges.
+ */
+ DRM_I915_PERF_PROP_CTX_HANDLE = 1,
+
+ /**
+ * A value of 1 requests the inclusion of raw OA unit reports as
+ * part of stream samples.
+ */
+ DRM_I915_PERF_PROP_SAMPLE_OA,
+
+ /**
+ * The value specifies which set of OA unit metrics should be
+ * be configured, defining the contents of any OA unit reports.
+ */
+ DRM_I915_PERF_PROP_OA_METRICS_SET,
+
+ /**
+ * The value specifies the size and layout of OA unit reports.
+ */
+ DRM_I915_PERF_PROP_OA_FORMAT,
+
+ /**
+ * Specifying this property implicitly requests periodic OA unit
+ * sampling and (at least on Haswell) the sampling frequency is derived
+ * from this exponent as follows:
+ *
+ * 80ns * 2^(period_exponent + 1)
+ */
+ DRM_I915_PERF_PROP_OA_EXPONENT,
+
+ DRM_I915_PERF_PROP_MAX /* non-ABI */
+};
+
+struct drm_i915_perf_open_param {
+ __u32 flags;
+#define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
+#define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
+#define I915_PERF_FLAG_DISABLED (1<<2)
+
+ /** The number of u64 (id, value) pairs */
+ __u32 num_properties;
+
+ /**
+ * Pointer to array of u64 (id, value) pairs configuring the stream
+ * to open.
+ */
+ __u64 properties_ptr;
+};
+
+/**
+ * Enable data capture for a stream that was either opened in a disabled state
+ * via I915_PERF_FLAG_DISABLED or was later disabled via
+ * I915_PERF_IOCTL_DISABLE.
+ *
+ * It is intended to be cheaper to disable and enable a stream than it may be
+ * to close and re-open a stream with the same configuration.
+ *
+ * It's undefined whether any pending data for the stream will be lost.
+ */
+#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
+
+/**
+ * Disable data capture for a stream.
+ *
+ * It is an error to try and read a stream that is disabled.
+ */
+#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
+
+/**
+ * Common to all i915 perf records
+ */
+struct drm_i915_perf_record_header {
+ __u32 type;
+ __u16 pad;
+ __u16 size;
+};
+
+enum drm_i915_perf_record_type {
+
+ /**
+ * Samples are the work horse record type whose contents are extensible
+ * and defined when opening an i915 perf stream based on the given
+ * properties.
+ *
+ * Boolean properties following the naming convention
+ * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
+ * every sample.
+ *
+ * The order of these sample properties given by userspace has no
+ * affect on the ordering of data within a sample. The order is
+ * documented here.
+ *
+ * struct {
+ * struct drm_i915_perf_record_header header;
+ *
+ * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
+ * };
+ */
+ DRM_I915_PERF_RECORD_SAMPLE = 1,
+
+ /*
+ * Indicates that one or more OA reports were not written by the
+ * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
+ * command collides with periodic sampling - which would be more likely
+ * at higher sampling frequencies.
+ */
+ DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
+
+ /**
+ * An error occurred that resulted in all pending OA reports being lost.
+ */
+ DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
+
+ DRM_I915_PERF_RECORD_MAX /* non-ABI */
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index a2e90722a4c4..1c80efb67d10 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -110,6 +110,7 @@ header-y += dlm_netlink.h
header-y += dlm_plock.h
header-y += dm-ioctl.h
header-y += dm-log-userspace.h
+header-y += dma-buf.h
header-y += dn.h
header-y += dqblk_xfs.h
header-y += edd.h
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
index 0098a522d9f4..ef8e2a8ad0af 100644
--- a/include/video/exynos5433_decon.h
+++ b/include/video/exynos5433_decon.h
@@ -89,6 +89,8 @@
#define VIDCON0_ENVID_F (1 << 0)
/* VIDOUTCON0 */
+#define VIDOUT_INTERLACE_FIELD_F (1 << 29)
+#define VIDOUT_INTERLACE_EN_F (1 << 28)
#define VIDOUT_LCD_ON (1 << 24)
#define VIDOUT_IF_F_MASK (0x3 << 20)
#define VIDOUT_RGB_IF (0x0 << 20)
diff --git a/lib/Kconfig b/lib/Kconfig
index f3552604e47a..87ecd41031bd 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -553,4 +553,7 @@ config SBITMAP
config PARMAN
tristate
+config PRIME_NUMBERS
+ tristate
+
endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 6b768b58a38d..f1a0364af377 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -198,6 +198,8 @@ obj-$(CONFIG_ASN1) += asn1_decoder.o
obj-$(CONFIG_FONT_SUPPORT) += fonts/
+obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/prime_numbers.c b/lib/prime_numbers.c
new file mode 100644
index 000000000000..550eec457c2e
--- /dev/null
+++ b/lib/prime_numbers.c
@@ -0,0 +1,315 @@
+#define pr_fmt(fmt) "prime numbers: " fmt "\n"
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/prime_numbers.h>
+#include <linux/slab.h>
+
+#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
+
+struct primes {
+ struct rcu_head rcu;
+ unsigned long last, sz;
+ unsigned long primes[];
+};
+
+#if BITS_PER_LONG == 64
+static const struct primes small_primes = {
+ .last = 61,
+ .sz = 64,
+ .primes = {
+ BIT(2) |
+ BIT(3) |
+ BIT(5) |
+ BIT(7) |
+ BIT(11) |
+ BIT(13) |
+ BIT(17) |
+ BIT(19) |
+ BIT(23) |
+ BIT(29) |
+ BIT(31) |
+ BIT(37) |
+ BIT(41) |
+ BIT(43) |
+ BIT(47) |
+ BIT(53) |
+ BIT(59) |
+ BIT(61)
+ }
+};
+#elif BITS_PER_LONG == 32
+static const struct primes small_primes = {
+ .last = 31,
+ .sz = 32,
+ .primes = {
+ BIT(2) |
+ BIT(3) |
+ BIT(5) |
+ BIT(7) |
+ BIT(11) |
+ BIT(13) |
+ BIT(17) |
+ BIT(19) |
+ BIT(23) |
+ BIT(29) |
+ BIT(31)
+ }
+};
+#else
+#error "unhandled BITS_PER_LONG"
+#endif
+
+static DEFINE_MUTEX(lock);
+static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes);
+
+static unsigned long selftest_max;
+
+static bool slow_is_prime_number(unsigned long x)
+{
+ unsigned long y = int_sqrt(x);
+
+ while (y > 1) {
+ if ((x % y) == 0)
+ break;
+ y--;
+ }
+
+ return y == 1;
+}
+
+static unsigned long slow_next_prime_number(unsigned long x)
+{
+ while (x < ULONG_MAX && !slow_is_prime_number(++x))
+ ;
+
+ return x;
+}
+
+static unsigned long clear_multiples(unsigned long x,
+ unsigned long *p,
+ unsigned long start,
+ unsigned long end)
+{
+ unsigned long m;
+
+ m = 2 * x;
+ if (m < start)
+ m = roundup(start, x);
+
+ while (m < end) {
+ __clear_bit(m, p);
+ m += x;
+ }
+
+ return x;
+}
+
+static bool expand_to_next_prime(unsigned long x)
+{
+ const struct primes *p;
+ struct primes *new;
+ unsigned long sz, y;
+
+ /* Betrand's Postulate (or Chebyshev's theorem) states that if n > 3,
+ * there is always at least one prime p between n and 2n - 2.
+ * Equivalently, if n > 1, then there is always at least one prime p
+ * such that n < p < 2n.
+ *
+ * http://mathworld.wolfram.com/BertrandsPostulate.html
+ * https://en.wikipedia.org/wiki/Bertrand's_postulate
+ */
+ sz = 2 * x;
+ if (sz < x)
+ return false;
+
+ sz = round_up(sz, BITS_PER_LONG);
+ new = kmalloc(sizeof(*new) + bitmap_size(sz),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!new)
+ return false;
+
+ mutex_lock(&lock);
+ p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
+ if (x < p->last) {
+ kfree(new);
+ goto unlock;
+ }
+
+ /* Where memory permits, track the primes using the
+ * Sieve of Eratosthenes. The sieve is to remove all multiples of known
+ * primes from the set, what remains in the set is therefore prime.
+ */
+ bitmap_fill(new->primes, sz);
+ bitmap_copy(new->primes, p->primes, p->sz);
+ for (y = 2UL; y < sz; y = find_next_bit(new->primes, sz, y + 1))
+ new->last = clear_multiples(y, new->primes, p->sz, sz);
+ new->sz = sz;
+
+ BUG_ON(new->last <= x);
+
+ rcu_assign_pointer(primes, new);
+ if (p != &small_primes)
+ kfree_rcu((struct primes *)p, rcu);
+
+unlock:
+ mutex_unlock(&lock);
+ return true;
+}
+
+static void free_primes(void)
+{
+ const struct primes *p;
+
+ mutex_lock(&lock);
+ p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
+ if (p != &small_primes) {
+ rcu_assign_pointer(primes, &small_primes);
+ kfree_rcu((struct primes *)p, rcu);
+ }
+ mutex_unlock(&lock);
+}
+
+/**
+ * next_prime_number - return the next prime number
+ * @x: the starting point for searching to test
+ *
+ * A prime number is an integer greater than 1 that is only divisible by
+ * itself and 1. The set of prime numbers is computed using the Sieve of
+ * Eratoshenes (on finding a prime, all multiples of that prime are removed
+ * from the set) enabling a fast lookup of the next prime number larger than
+ * @x. If the sieve fails (memory limitation), the search falls back to using
+ * slow trial-divison, up to the value of ULONG_MAX (which is reported as the
+ * final prime as a sentinel).
+ *
+ * Returns: the next prime number larger than @x
+ */
+unsigned long next_prime_number(unsigned long x)
+{
+ const struct primes *p;
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ while (x >= p->last) {
+ rcu_read_unlock();
+
+ if (!expand_to_next_prime(x))
+ return slow_next_prime_number(x);
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ }
+ x = find_next_bit(p->primes, p->last, x + 1);
+ rcu_read_unlock();
+
+ return x;
+}
+EXPORT_SYMBOL(next_prime_number);
+
+/**
+ * is_prime_number - test whether the given number is prime
+ * @x: the number to test
+ *
+ * A prime number is an integer greater than 1 that is only divisible by
+ * itself and 1. Internally a cache of prime numbers is kept (to speed up
+ * searching for sequential primes, see next_prime_number()), but if the number
+ * falls outside of that cache, its primality is tested using trial-divison.
+ *
+ * Returns: true if @x is prime, false for composite numbers.
+ */
+bool is_prime_number(unsigned long x)
+{
+ const struct primes *p;
+ bool result;
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ while (x >= p->sz) {
+ rcu_read_unlock();
+
+ if (!expand_to_next_prime(x))
+ return slow_is_prime_number(x);
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ }
+ result = test_bit(x, p->primes);
+ rcu_read_unlock();
+
+ return result;
+}
+EXPORT_SYMBOL(is_prime_number);
+
+static void dump_primes(void)
+{
+ const struct primes *p;
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+
+ if (buf)
+ bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
+ pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
+ p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
+
+ rcu_read_unlock();
+
+ kfree(buf);
+}
+
+static int selftest(unsigned long max)
+{
+ unsigned long x, last;
+
+ if (!max)
+ return 0;
+
+ for (last = 0, x = 2; x < max; x++) {
+ bool slow = slow_is_prime_number(x);
+ bool fast = is_prime_number(x);
+
+ if (slow != fast) {
+ pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!",
+ x, slow ? "yes" : "no", fast ? "yes" : "no");
+ goto err;
+ }
+
+ if (!slow)
+ continue;
+
+ if (next_prime_number(last) != x) {
+ pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu",
+ last, x, next_prime_number(last));
+ goto err;
+ }
+ last = x;
+ }
+
+ pr_info("selftest(%lu) passed, last prime was %lu", x, last);
+ return 0;
+
+err:
+ dump_primes();
+ return -EINVAL;
+}
+
+static int __init primes_init(void)
+{
+ return selftest(selftest_max);
+}
+
+static void __exit primes_exit(void)
+{
+ free_primes();
+}
+
+module_init(primes_init);
+module_exit(primes_exit);
+
+module_param_named(selftest, selftest_max, ulong, 0400);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 9913be8532ab..8fd745cb3f36 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -311,9 +311,15 @@ int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
}
EXPORT_SYMBOL_GPL(snd_hda_get_conn_index);
-
-/* return DEVLIST_LEN parameter of the given widget */
-static unsigned int get_num_devices(struct hda_codec *codec, hda_nid_t nid)
+/**
+ * snd_hda_get_num_devices - get DEVLIST_LEN parameter of the given widget
+ * @codec: the HDA codec
+ * @nid: NID of the pin to parse
+ *
+ * Get the device entry number on the given widget. This is a feature of
+ * DP MST audio. Each pin can have several device entries in it.
+ */
+unsigned int snd_hda_get_num_devices(struct hda_codec *codec, hda_nid_t nid)
{
unsigned int wcaps = get_wcaps(codec, nid);
unsigned int parm;
@@ -327,6 +333,7 @@ static unsigned int get_num_devices(struct hda_codec *codec, hda_nid_t nid)
parm = 0;
return parm & AC_DEV_LIST_LEN_MASK;
}
+EXPORT_SYMBOL_GPL(snd_hda_get_num_devices);
/**
* snd_hda_get_devices - copy device list without cache
@@ -344,7 +351,7 @@ int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
unsigned int parm;
int i, dev_len, devices;
- parm = get_num_devices(codec, nid);
+ parm = snd_hda_get_num_devices(codec, nid);
if (!parm) /* not multi-stream capable */
return 0;
@@ -368,6 +375,63 @@ int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
return devices;
}
+/**
+ * snd_hda_get_dev_select - get device entry select on the pin
+ * @codec: the HDA codec
+ * @nid: NID of the pin to get device entry select
+ *
+ * Get the devcie entry select on the pin. Return the device entry
+ * id selected on the pin. Return 0 means the first device entry
+ * is selected or MST is not supported.
+ */
+int snd_hda_get_dev_select(struct hda_codec *codec, hda_nid_t nid)
+{
+ /* not support dp_mst will always return 0, using first dev_entry */
+ if (!codec->dp_mst)
+ return 0;
+
+ return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_DEVICE_SEL, 0);
+}
+EXPORT_SYMBOL_GPL(snd_hda_get_dev_select);
+
+/**
+ * snd_hda_set_dev_select - set device entry select on the pin
+ * @codec: the HDA codec
+ * @nid: NID of the pin to set device entry select
+ * @dev_id: device entry id to be set
+ *
+ * Set the device entry select on the pin nid.
+ */
+int snd_hda_set_dev_select(struct hda_codec *codec, hda_nid_t nid, int dev_id)
+{
+ int ret, num_devices;
+
+ /* not support dp_mst will always return 0, using first dev_entry */
+ if (!codec->dp_mst)
+ return 0;
+
+ /* AC_PAR_DEVLIST_LEN is 0 based. */
+ num_devices = snd_hda_get_num_devices(codec, nid) + 1;
+ /* If Device List Length is 0 (num_device = 1),
+ * the pin is not multi stream capable.
+ * Do nothing in this case.
+ */
+ if (num_devices == 1)
+ return 0;
+
+ /* Behavior of setting index being equal to or greater than
+ * Device List Length is not predictable
+ */
+ if (num_devices <= dev_id)
+ return -EINVAL;
+
+ ret = snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_DEVICE_SEL, dev_id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_hda_set_dev_select);
+
/*
* read widget caps for each widget and store in cache
*/
@@ -403,6 +467,10 @@ static int read_pin_defaults(struct hda_codec *codec)
pin->nid = nid;
pin->cfg = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_CONFIG_DEFAULT, 0);
+ /*
+ * all device entries are the same widget control so far
+ * fixme: if any codec is different, need fix here
+ */
pin->ctrl = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_PIN_WIDGET_CONTROL,
0);
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 373fcad840ea..f17f25245e52 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -347,8 +347,11 @@ int snd_hda_override_conn_list(struct hda_codec *codec, hda_nid_t nid, int nums,
const hda_nid_t *list);
int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
hda_nid_t nid, int recursive);
+unsigned int snd_hda_get_num_devices(struct hda_codec *codec, hda_nid_t nid);
int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
u8 *dev_list, int max_devices);
+int snd_hda_get_dev_select(struct hda_codec *codec, hda_nid_t nid);
+int snd_hda_set_dev_select(struct hda_codec *codec, hda_nid_t nid, int dev_id);
struct hda_verb {
hda_nid_t nid;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 3fc201c3b95a..fd5efa72a68b 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -76,6 +76,7 @@ struct hdmi_spec_per_cvt {
struct hdmi_spec_per_pin {
hda_nid_t pin_nid;
+ int dev_id;
/* pin idx, different device entries on the same pin use the same idx */
int pin_nid_idx;
int num_mux_nids;
@@ -130,7 +131,23 @@ struct hdmi_spec {
struct snd_array cvts; /* struct hdmi_spec_per_cvt */
hda_nid_t cvt_nids[4]; /* only for haswell fix */
+ /*
+ * num_pins is the number of virtual pins
+ * for example, there are 3 pins, and each pin
+ * has 4 device entries, then the num_pins is 12
+ */
int num_pins;
+ /*
+ * num_nids is the number of real pins
+ * In the above example, num_nids is 3
+ */
+ int num_nids;
+ /*
+ * dev_num is the number of device entries
+ * on each pin.
+ * In the above example, dev_num is 4
+ */
+ int dev_num;
struct snd_array pins; /* struct hdmi_spec_per_pin */
struct hdmi_pcm pcm_rec[16];
struct mutex pcm_lock;
@@ -217,14 +234,26 @@ union audio_infoframe {
/* obtain hda_pcm object assigned to idx */
#define get_pcm_rec(spec, idx) (get_hdmi_pcm(spec, idx)->pcm)
-static int pin_nid_to_pin_index(struct hda_codec *codec, hda_nid_t pin_nid)
+static int pin_id_to_pin_index(struct hda_codec *codec,
+ hda_nid_t pin_nid, int dev_id)
{
struct hdmi_spec *spec = codec->spec;
int pin_idx;
+ struct hdmi_spec_per_pin *per_pin;
+
+ /*
+ * (dev_id == -1) means it is NON-MST pin
+ * return the first virtual pin on this port
+ */
+ if (dev_id == -1)
+ dev_id = 0;
- for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++)
- if (get_pin(spec, pin_idx)->pin_nid == pin_nid)
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ per_pin = get_pin(spec, pin_idx);
+ if ((per_pin->pin_nid == pin_nid) &&
+ (per_pin->dev_id == dev_id))
return pin_idx;
+ }
codec_warn(codec, "HDMI: pin nid %d not registered\n", pin_nid);
return -EINVAL;
@@ -724,10 +753,11 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
-static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
+static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id)
{
struct hdmi_spec *spec = codec->spec;
- int pin_idx = pin_nid_to_pin_index(codec, nid);
+ int pin_idx = pin_id_to_pin_index(codec, nid, dev_id);
if (pin_idx < 0)
return;
@@ -738,7 +768,8 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
static void jack_callback(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
- check_presence_and_report(codec, jack->nid);
+ /* hda_jack don't support DP MST */
+ check_presence_and_report(codec, jack->nid, 0);
}
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -747,6 +778,12 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
struct hda_jack_tbl *jack;
int dev_entry = (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
+ /*
+ * assume DP MST uses dyn_pcm_assign and acomp and
+ * never comes here
+ * if DP MST supports unsol event, below code need
+ * consider dev_entry
+ */
jack = snd_hda_jack_tbl_get_from_tag(codec, tag);
if (!jack)
return;
@@ -757,7 +794,8 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
codec->addr, jack->nid, dev_entry, !!(res & AC_UNSOL_RES_IA),
!!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
- check_presence_and_report(codec, jack->nid);
+ /* hda_jack don't support DP MST */
+ check_presence_and_report(codec, jack->nid, 0);
}
static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -970,28 +1008,60 @@ static int intel_cvt_id_to_mux_idx(struct hdmi_spec *spec,
* by any other pins.
*/
static void intel_not_share_assigned_cvt(struct hda_codec *codec,
- hda_nid_t pin_nid, int mux_idx)
+ hda_nid_t pin_nid,
+ int dev_id, int mux_idx)
{
struct hdmi_spec *spec = codec->spec;
hda_nid_t nid;
int cvt_idx, curr;
struct hdmi_spec_per_cvt *per_cvt;
+ struct hdmi_spec_per_pin *per_pin;
+ int pin_idx;
+
+ /* configure the pins connections */
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ int dev_id_saved;
+ int dev_num;
- /* configure all pins, including "no physical connection" ones */
- for_each_hda_codec_node(nid, codec) {
- unsigned int wid_caps = get_wcaps(codec, nid);
- unsigned int wid_type = get_wcaps_type(wid_caps);
+ per_pin = get_pin(spec, pin_idx);
+ /*
+ * pin not connected to monitor
+ * no need to operate on it
+ */
+ if (!per_pin->pcm)
+ continue;
- if (wid_type != AC_WID_PIN)
+ if ((per_pin->pin_nid == pin_nid) &&
+ (per_pin->dev_id == dev_id))
continue;
- if (nid == pin_nid)
+ /*
+ * if per_pin->dev_id >= dev_num,
+ * snd_hda_get_dev_select() will fail,
+ * and the following operation is unpredictable.
+ * So skip this situation.
+ */
+ dev_num = snd_hda_get_num_devices(codec, per_pin->pin_nid) + 1;
+ if (per_pin->dev_id >= dev_num)
continue;
+ nid = per_pin->pin_nid;
+
+ /*
+ * Calling this function should not impact
+ * on the device entry selection
+ * So let's save the dev id for each pin,
+ * and restore it when return
+ */
+ dev_id_saved = snd_hda_get_dev_select(codec, nid);
+ snd_hda_set_dev_select(codec, nid, per_pin->dev_id);
curr = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_CONNECT_SEL, 0);
- if (curr != mux_idx)
+ if (curr != mux_idx) {
+ snd_hda_set_dev_select(codec, nid, dev_id_saved);
continue;
+ }
+
/* choose an unassigned converter. The conveters in the
* connection list are in the same order as in the codec.
@@ -1008,12 +1078,13 @@ static void intel_not_share_assigned_cvt(struct hda_codec *codec,
break;
}
}
+ snd_hda_set_dev_select(codec, nid, dev_id_saved);
}
}
/* A wrapper of intel_not_share_asigned_cvt() */
static void intel_not_share_assigned_cvt_nid(struct hda_codec *codec,
- hda_nid_t pin_nid, hda_nid_t cvt_nid)
+ hda_nid_t pin_nid, int dev_id, hda_nid_t cvt_nid)
{
int mux_idx;
struct hdmi_spec *spec = codec->spec;
@@ -1025,7 +1096,7 @@ static void intel_not_share_assigned_cvt_nid(struct hda_codec *codec,
*/
mux_idx = intel_cvt_id_to_mux_idx(spec, cvt_nid);
if (mux_idx >= 0)
- intel_not_share_assigned_cvt(codec, pin_nid, mux_idx);
+ intel_not_share_assigned_cvt(codec, pin_nid, dev_id, mux_idx);
}
/* skeleton caller of pin_cvt_fixup ops */
@@ -1140,6 +1211,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
per_pin->cvt_nid = per_cvt->cvt_nid;
hinfo->nid = per_cvt->cvt_nid;
+ snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id);
snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
AC_VERB_SET_CONNECT_SEL,
per_pin->mux_idx);
@@ -1198,6 +1270,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
return -EINVAL;
}
+ /* all the device entries on the same pin have the same conn list */
per_pin->num_mux_nids = snd_hda_get_connections(codec, pin_nid,
per_pin->mux_nids,
HDA_MAX_CONNECTIONS);
@@ -1215,13 +1288,13 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
return per_pin->pin_nid_idx;
/* have a second try; check the "reserved area" over num_pins */
- for (i = spec->num_pins; i < spec->pcm_used; i++) {
+ for (i = spec->num_nids; i < spec->pcm_used; i++) {
if (!test_bit(i, &spec->pcm_bitmap))
return i;
}
/* the last try; check the empty slots in pins */
- for (i = 0; i < spec->num_pins; i++) {
+ for (i = 0; i < spec->num_nids; i++) {
if (!test_bit(i, &spec->pcm_bitmap))
return i;
}
@@ -1296,10 +1369,13 @@ static void hdmi_pcm_setup_pin(struct hdmi_spec *spec,
per_pin->cvt_nid = hinfo->nid;
mux_idx = hdmi_get_pin_cvt_mux(spec, per_pin, hinfo->nid);
- if (mux_idx < per_pin->num_mux_nids)
+ if (mux_idx < per_pin->num_mux_nids) {
+ snd_hda_set_dev_select(codec, per_pin->pin_nid,
+ per_pin->dev_id);
snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
AC_VERB_SET_CONNECT_SEL,
mux_idx);
+ }
snd_hda_spdif_ctls_assign(codec, per_pin->pcm_idx, hinfo->nid);
non_pcm = check_non_pcm_per_cvt(codec, hinfo->nid);
@@ -1467,6 +1543,11 @@ static struct snd_jack *pin_idx_to_jack(struct hda_codec *codec,
if (per_pin->pcm_idx >= 0 && spec->dyn_pcm_assign)
jack = spec->pcm_rec[per_pin->pcm_idx].jack;
else if (!spec->dyn_pcm_assign) {
+ /*
+ * jack tbl doesn't support DP MST
+ * DP MST will use dyn_pcm_assign,
+ * so DP MST will never come here
+ */
jack_tbl = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
if (jack_tbl)
jack = jack_tbl->jack;
@@ -1485,9 +1566,9 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
mutex_lock(&per_pin->lock);
eld->monitor_present = false;
- size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid, -1,
- &eld->monitor_present, eld->eld_buffer,
- ELD_MAX_SIZE);
+ size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid,
+ per_pin->dev_id, &eld->monitor_present,
+ eld->eld_buffer, ELD_MAX_SIZE);
if (size > 0) {
size = min(size, ELD_MAX_SIZE);
if (snd_hdmi_parse_eld(codec, &eld->info,
@@ -1565,38 +1646,81 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
int pin_idx;
struct hdmi_spec_per_pin *per_pin;
int err;
+ int dev_num, i;
caps = snd_hda_query_pin_caps(codec, pin_nid);
if (!(caps & (AC_PINCAP_HDMI | AC_PINCAP_DP)))
return 0;
+ /*
+ * For DP MST audio, Configuration Default is the same for
+ * all device entries on the same pin
+ */
config = snd_hda_codec_get_pincfg(codec, pin_nid);
if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
return 0;
- if (is_haswell_plus(codec))
- intel_haswell_fixup_connect_list(codec, pin_nid);
-
- pin_idx = spec->num_pins;
- per_pin = snd_array_new(&spec->pins);
- if (!per_pin)
- return -ENOMEM;
-
- per_pin->pin_nid = pin_nid;
- per_pin->non_pcm = false;
- if (spec->dyn_pcm_assign)
- per_pin->pcm_idx = -1;
- else {
- per_pin->pcm = get_hdmi_pcm(spec, pin_idx);
- per_pin->pcm_idx = pin_idx;
+ /*
+ * To simplify the implementation, malloc all
+ * the virtual pins in the initialization statically
+ */
+ if (is_haswell_plus(codec)) {
+ /*
+ * On Intel platforms, device entries number is
+ * changed dynamically. If there is a DP MST
+ * hub connected, the device entries number is 3.
+ * Otherwise, it is 1.
+ * Here we manually set dev_num to 3, so that
+ * we can initialize all the device entries when
+ * bootup statically.
+ */
+ dev_num = 3;
+ spec->dev_num = 3;
+ } else if (spec->dyn_pcm_assign && codec->dp_mst) {
+ dev_num = snd_hda_get_num_devices(codec, pin_nid) + 1;
+ /*
+ * spec->dev_num is the maxinum number of device entries
+ * among all the pins
+ */
+ spec->dev_num = (spec->dev_num > dev_num) ?
+ spec->dev_num : dev_num;
+ } else {
+ /*
+ * If the platform doesn't support DP MST,
+ * manually set dev_num to 1. This means
+ * the pin has only one device entry.
+ */
+ dev_num = 1;
+ spec->dev_num = 1;
}
- per_pin->pin_nid_idx = pin_idx;
- err = hdmi_read_pin_conn(codec, pin_idx);
- if (err < 0)
- return err;
+ for (i = 0; i < dev_num; i++) {
+ pin_idx = spec->num_pins;
+ per_pin = snd_array_new(&spec->pins);
- spec->num_pins++;
+ if (!per_pin)
+ return -ENOMEM;
+
+ if (spec->dyn_pcm_assign) {
+ per_pin->pcm = NULL;
+ per_pin->pcm_idx = -1;
+ } else {
+ per_pin->pcm = get_hdmi_pcm(spec, pin_idx);
+ per_pin->pcm_idx = pin_idx;
+ }
+ per_pin->pin_nid = pin_nid;
+ per_pin->pin_nid_idx = spec->num_nids;
+ per_pin->dev_id = i;
+ per_pin->non_pcm = false;
+ snd_hda_set_dev_select(codec, pin_nid, i);
+ if (is_haswell_plus(codec))
+ intel_haswell_fixup_connect_list(codec, pin_nid);
+ err = hdmi_read_pin_conn(codec, pin_idx);
+ if (err < 0)
+ return err;
+ spec->num_pins++;
+ }
+ spec->num_nids++;
return 0;
}
@@ -1744,7 +1868,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
/* Todo: add DP1.2 MST audio support later */
if (codec_has_acomp(codec))
- snd_hdac_sync_audio_rate(&codec->core, pin_nid, -1,
+ snd_hdac_sync_audio_rate(&codec->core, pin_nid, per_pin->dev_id,
runtime->rate);
non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
@@ -1762,6 +1886,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
pinctl | PIN_OUT);
}
+ /* snd_hda_set_dev_select() has been called before */
err = spec->ops.setup_stream(codec, cvt_nid, pin_nid,
stream_tag, format);
mutex_unlock(&spec->pcm_lock);
@@ -1897,17 +2022,23 @@ static bool is_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
static int generic_hdmi_build_pcms(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
- int pin_idx;
+ int idx;
- for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ /*
+ * for non-mst mode, pcm number is the same as before
+ * for DP MST mode, pcm number is (nid number + dev_num - 1)
+ * dev_num is the device entry number in a pin
+ *
+ */
+ for (idx = 0; idx < spec->num_nids + spec->dev_num - 1; idx++) {
struct hda_pcm *info;
struct hda_pcm_stream *pstr;
- info = snd_hda_codec_pcm_new(codec, "HDMI %d", pin_idx);
+ info = snd_hda_codec_pcm_new(codec, "HDMI %d", idx);
if (!info)
return -ENOMEM;
- spec->pcm_rec[pin_idx].pcm = info;
+ spec->pcm_rec[idx].pcm = info;
spec->pcm_used++;
info->pcm_type = HDA_PCM_TYPE_HDMI;
info->own_chmap = true;
@@ -1915,6 +2046,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
pstr->substreams = 1;
pstr->ops = generic_ops;
+ /* pcm number is less than 16 */
+ if (spec->pcm_used >= 16)
+ break;
/* other pstr fields are set in open */
}
@@ -2070,7 +2204,9 @@ static int generic_hdmi_init(struct hda_codec *codec)
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
hda_nid_t pin_nid = per_pin->pin_nid;
+ int dev_id = per_pin->dev_id;
+ snd_hda_set_dev_select(codec, pin_nid, dev_id);
hdmi_init_pin(codec, pin_nid);
if (!codec_has_acomp(codec))
snd_hda_jack_detect_enable_callback(codec, pin_nid,
@@ -2178,6 +2314,7 @@ static int alloc_generic_hdmi(struct hda_codec *codec)
return -ENOMEM;
spec->ops = generic_standard_hdmi_ops;
+ spec->dev_num = 1; /* initialize to 1 */
mutex_init(&spec->pcm_lock);
snd_hdac_register_chmap_ops(&codec->core, &spec->chmap);
@@ -2295,6 +2432,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
{
struct hda_codec *codec = audio_ptr;
int pin_nid;
+ int dev_id = pipe;
/* we assume only from port-B to port-D */
if (port < 1 || port > 3)
@@ -2321,7 +2459,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
return;
snd_hdac_i915_set_bclk(&codec->bus->core);
- check_presence_and_report(codec, pin_nid);
+ check_presence_and_report(codec, pin_nid, dev_id);
}
/* register i915 component pin_eld_notify callback */
@@ -2354,11 +2492,13 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
hda_nid_t cvt_nid)
{
if (per_pin) {
+ snd_hda_set_dev_select(codec, per_pin->pin_nid,
+ per_pin->dev_id);
intel_verify_pin_cvt_connect(codec, per_pin);
intel_not_share_assigned_cvt(codec, per_pin->pin_nid,
- per_pin->mux_idx);
+ per_pin->dev_id, per_pin->mux_idx);
} else {
- intel_not_share_assigned_cvt_nid(codec, 0, cvt_nid);
+ intel_not_share_assigned_cvt_nid(codec, 0, 0, cvt_nid);
}
}
@@ -2378,6 +2518,8 @@ static int patch_i915_hsw_hdmi(struct hda_codec *codec)
if (err < 0)
return err;
spec = codec->spec;
+ codec->dp_mst = true;
+ spec->dyn_pcm_assign = true;
intel_haswell_enable_all_pins(codec, true);
intel_haswell_fixup_enable_dp12(codec);
@@ -2389,7 +2531,6 @@ static int patch_i915_hsw_hdmi(struct hda_codec *codec)
codec->core.link_power_control = 1;
codec->patch_ops.set_power_state = haswell_set_power_state;
- codec->dp_mst = true;
codec->depop_delay = 0;
codec->auto_runtime_pm = 1;
diff --git a/tools/testing/selftests/drivers/gpu/drm_mm.sh b/tools/testing/selftests/drivers/gpu/drm_mm.sh
new file mode 100755
index 000000000000..96dd55c92799
--- /dev/null
+++ b/tools/testing/selftests/drivers/gpu/drm_mm.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# Runs API tests for struct drm_mm (DRM range manager)
+
+if ! /sbin/modprobe -n -q test-drm_mm; then
+ echo "drivers/gpu/drm_mm: [skip]"
+ exit 77
+fi
+
+if /sbin/modprobe -q test-drm_mm; then
+ /sbin/modprobe -q -r test-drm_mm
+ echo "drivers/gpu/drm_mm: ok"
+else
+ echo "drivers/gpu/drm_mm: [FAIL]"
+ exit 1
+fi
diff --git a/tools/testing/selftests/lib/prime_numbers.sh b/tools/testing/selftests/lib/prime_numbers.sh
new file mode 100755
index 000000000000..da4cbcd766f5
--- /dev/null
+++ b/tools/testing/selftests/lib/prime_numbers.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# Checks fast/slow prime_number generation for inconsistencies
+
+if ! /sbin/modprobe -q -r prime_numbers; then
+ echo "prime_numbers: [SKIP]"
+ exit 77
+fi
+
+if /sbin/modprobe -q prime_numbers selftest=65536; then
+ /sbin/modprobe -q -r prime_numbers
+ echo "prime_numbers: ok"
+else
+ echo "prime_numbers: [FAIL]"
+ exit 1
+fi