aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-25 16:18:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-25 16:18:27 -0700
commit2518f226c60d8e04d18ba4295500a5b0b8ac7659 (patch)
treee74de5ca0db01398cbb0c34376f74a81d7583c75 /drivers/gpu/drm/i915
parentMerge tag 'devicetree-for-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux (diff)
parentMerge tag 'drm-intel-next-fixes-2022-05-24' of git://anongit.freedesktop.org/drm/drm-intel into drm-next (diff)
downloadlinux-dev-2518f226c60d8e04d18ba4295500a5b0b8ac7659.tar.xz
linux-dev-2518f226c60d8e04d18ba4295500a5b0b8ac7659.zip
Merge tag 'drm-next-2022-05-25' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "Intel have enabled DG2 on certain SKUs for laptops, AMD has started some new GPU support, msm has user allocated VA controls dma-buf: - add dma_resv_replace_fences - add dma_resv_get_singleton - make dma_excl_fence private core: - EDID parser refactorings - switch drivers to drm_mode_copy/duplicate - DRM managed mutex initialization display-helper: - put HDMI, SCDC, HDCP, DSC and DP into new module gem: - rework fence handling ttm: - rework bulk move handling - add common debugfs for resource managers - convert to kvcalloc format helpers: - support monochrome formats - RGB888, RGB565 to XRGB8888 conversions fbdev: - cfb/sys_imageblit fixes - pagelist corruption fix - create offb platform device - deferred io improvements sysfb: - Kconfig rework - support for VESA mode selection bridge: - conversions to devm_drm_of_get_bridge - conversions to panel_bridge - analogix_dp - autosuspend support - it66121 - audio support - tc358767 - DSI to DPI support - icn6211 - PLL/I2C fixes, DT property - adv7611 - enable DRM_BRIDGE_OP_HPD - anx7625 - fill ELD if no monitor - dw_hdmi - add audio support - lontium LT9211 support, i.MXMP LDB - it6505: Kconfig fix, DPCD set power fix - adv7511 - CEC support for ADV7535 panel: - ltk035c5444t, B133UAN01, NV3052C panel support - DataImage FG040346DSSWBG04 support - st7735r - DT bindings fix - ssd130x - fixes i915: - DG2 laptop PCI-IDs ("motherboard down") - Initial RPL-P PCI IDs - compute engine ABI - DG2 Tile4 support - DG2 CCS clear color compression support - DG2 render/media compression formats support - ATS-M platform info - RPL-S PCI IDs added - Bump ADL-P DMC version to v2.16 - Support static DRRS - Support multiple eDP/LVDS native mode refresh rates - DP HDR support for HSW+ - Lots of display refactoring + fixes - GuC hwconfig support and query - sysfs support for multi-tile - fdinfo per-client gpu utilisation - add geometry subslices query - fix prime mmap with LMEM - fix vm open count and remove vma refcounts - contiguous allocation fixes - steered register write support - small PCI BAR enablement - GuC error capture support - sunset igpu legacy mmap support for newer devices - GuC version 70.1.1 support amdgpu: - Initial SoC21 support - SMU 13.x enablement - SMU 13.0.4 support - ttm_eu cleanups - USB-C, GPUVM updates - TMZ fixes for RV - RAS support for VCN - PM sysfs code cleanup - DC FP rework - extend CG/PG flags to 64-bit - SI dpm lockdep fix - runtime PM fixes amdkfd: - RAS/SVM fixes - TLB flush fixes - CRIU GWS support - ignore bogus MEC signals more efficiently msm: - Fourcc modifier for tiled but not compressed layouts - Support for userspace allocated IOVA (GPU virtual address) - DPU: DSC (Display Stream Compression) support - DP: eDP support - DP: conversion to use drm_bridge and drm_bridge_connector - Merge DPU1 and MDP5 MDSS driver - DPU: writeback support nouveau: - make some structures static - make some variables static - switch to drm_gem_plane_helper_prepare_fb radeon: - misc fixes/cleanups mxsfb: - rework crtc mode setting - LCDIF CRC support etnaviv: - fencing improvements - fix address space collisions - cleanup MMU reference handling gma500: - GEM/GTT improvements - connector handling fixes komeda: - switch to plane reset helper mediatek: - MIPI DSI improvements omapdrm: - GEM improvements qxl: - aarch64 support vc4: - add a CL submission tracepoint - HDMI YUV support - HDMI/clock improvements - drop is_hdmi caching virtio: - remove restriction of non-zero blob types vmwgfx: - support for cursormob and cursorbypass 4 - fence improvements tidss: - reset DISPC on startup solomon: - SPI support - DT improvements sun4i: - allwinner D1 support - drop is_hdmi caching imx: - use swap() instead of open-coding - use devm_platform_ioremap_resource - remove redunant initializations ast: - Displayport support rockchip: - Refactor IOMMU initialisation - make some structures static - replace drm_detect_hdmi_monitor with drm_display_info.is_hdmi - support swapped YUV formats, - clock improvements - rk3568 support - VOP2 support mediatek: - MT8186 support tegra: - debugabillity improvements" * tag 'drm-next-2022-05-25' of git://anongit.freedesktop.org/drm/drm: (1740 commits) drm/i915/dsi: fix VBT send packet port selection for ICL+ drm/i915/uc: Fix undefined behavior due to shift overflowing the constant drm/i915/reg: fix undefined behavior due to shift overflowing the constant drm/i915/gt: Fix use of static in macro mismatch drm/i915/audio: fix audio code enable/disable pipe logging drm/i915: Fix CFI violation with show_dynamic_id() drm/i915: Fix 'mixing different enum types' warnings in intel_display_power.c drm/i915/gt: Fix build error without CONFIG_PM drm/msm/dpu: handle pm_runtime_get_sync() errors in bind path drm/msm/dpu: add DRM_MODE_ROTATE_180 back to supported rotations drm/msm: don't free the IRQ if it was not requested drm/msm/dpu: limit writeback modes according to max_linewidth drm/amd: Don't reset dGPUs if the system is going to s2idle drm/amdgpu: Unmap legacy queue when MES is enabled drm: msm: fix possible memory leak in mdp5_crtc_cursor_set() drm/msm: Fix fb plane offset calculation drm/msm/a6xx: Fix refcount leak in a6xx_gpu_init drm/msm/dsi: don't powerup at modeset time for parade-ps8640 drm/rockchip: Change register space names in vop2 dt-bindings: display: rockchip: make reg-names mandatory for VOP2 ...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig44
-rw-r--r--drivers/gpu/drm/i915/Makefile20
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c76
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c39
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c24
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c143
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c828
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c312
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c95
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c962
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c302
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c4852
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h163
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c1501
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c1912
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h173
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h49
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c131
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h46
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c569
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c240
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c410
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c406
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c70
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c113
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c144
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c363
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c99
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c186
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h45
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c4
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c22
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c266
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c15
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c71
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c46
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c147
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c54
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c9
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c167
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h11
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c71
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c696
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c224
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c224
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_gmch.c654
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_gmch.h46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c94
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c122
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c602
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c68
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h67
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hwconfig.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c114
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h32
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c385
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c139
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h50
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c40
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c21
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c86
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c63
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c259
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h218
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c48
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h19
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c185
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c1657
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h33
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h92
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c164
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c130
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c59
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c664
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c32
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile30
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c89
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c340
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h128
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1055
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h82
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c40
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c1097
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h400
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c148
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c22
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c30
-rw-r--r--drivers/gpu/drm/i915/i915_deps.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c49
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h4
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c159
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.h68
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h155
-rw-r--r--drivers/gpu/drm/i915/i915_file_private.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c89
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c316
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h43
-rw-r--r--drivers/gpu/drm/i915/i915_params.c10
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c69
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c5
-rw-r--r--drivers/gpu/drm/i915/i915_query.c94
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h200
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h2
-rw-r--r--drivers/gpu/drm/i915/i915_request.c3
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c310
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.h3
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c11
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h37
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c127
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h15
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h8
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c42
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h15
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c10
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c252
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h32
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c1292
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c4
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h10
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c545
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h1
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c7
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.h1
-rw-r--r--drivers/gpu/drm/i915/intel_step.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c113
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h7
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c13
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c4
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c3
282 files changed, 18965 insertions, 13348 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 98c5450b8eac..7ae3b7d67fcf 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -4,13 +4,16 @@ config DRM_I915
depends on DRM
depends on X86 && PCI
depends on !PREEMPT_RT
- select INTEL_GTT
+ select INTEL_GTT if X86
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
- select DRM_DP_HELPER
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDCP_HELPER
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
@@ -30,6 +33,7 @@ config DRM_I915
select VMAP_PFN
select DRM_TTM
select DRM_BUDDY
+ select AUXILIARY_BUS
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
@@ -102,40 +106,30 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
config DRM_I915_GVT
- bool "Enable Intel GVT-g graphics virtualization host support"
+ bool
+
+config DRM_I915_GVT_KVMGT
+ tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
depends on X86
depends on 64BIT
- default n
+ depends on KVM
+ depends on VFIO_MDEV
+ select DRM_I915_GVT
+ select KVM_EXTERNAL_WRITE_TRACKING
+
help
Choose this option if you want to enable Intel GVT-g graphics
virtualization technology host support with integrated graphics.
With GVT-g, it's possible to have one integrated graphics
- device shared by multiple VMs under different hypervisors.
-
- Note that at least one hypervisor like Xen or KVM is required for
- this driver to work, and it only supports newer device from
- Broadwell+. For further information and setup guide, you can
- visit: http://01.org/igvt-g.
+ device shared by multiple VMs under KVM.
- Now it's just a stub to support the modifications of i915 for
- GVT device model. It requires at least one MPT modules for Xen/KVM
- and other components of GVT device model to work. Use it under
- you own risk.
+ Note that this driver only supports newer device from Broadwell on.
+ For further information and setup guide, you can visit:
+ http://01.org/igvt-g.
If in doubt, say "N".
-config DRM_I915_GVT_KVMGT
- tristate "Enable KVM/VFIO support for Intel GVT-g"
- depends on DRM_I915_GVT
- depends on KVM
- depends on VFIO_MDEV
- select KVM_EXTERNAL_WRITE_TRACKING
- default n
- help
- Choose this option if you want to enable KVMGT support for
- Intel GVT-g.
-
config DRM_I915_PXP
bool "Enable Intel PXP support"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 7df74a71d454..d2b18f03a33c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,6 +33,7 @@ subdir-ccflags-y += -I$(srctree)/$(src)
# core driver code
i915-y += i915_driver.o \
+ i915_drm_client.o \
i915_config.o \
i915_getparam.o \
i915_ioctl.o \
@@ -106,6 +107,8 @@ gt-y += \
gt/intel_gt_pm_debugfs.o \
gt/intel_gt_pm_irq.o \
gt/intel_gt_requests.o \
+ gt/intel_gt_sysfs.o \
+ gt/intel_gt_sysfs_pm.o \
gt/intel_gtt.o \
gt/intel_llc.o \
gt/intel_lrc.o \
@@ -125,6 +128,8 @@ gt-y += \
gt/intel_workarounds.o \
gt/shmem_utils.o \
gt/sysfs_engines.o
+# x86 intel-gtt module support
+gt-$(CONFIG_X86) += gt/intel_gt_gmch.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -185,9 +190,11 @@ i915-y += gt/uc/intel_uc.o \
gt/uc/intel_uc_fw.o \
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
+ gt/uc/intel_guc_capture.o \
gt/uc/intel_guc_ct.o \
gt/uc/intel_guc_debugfs.o \
gt/uc/intel_guc_fw.o \
+ gt/uc/intel_guc_hwconfig.o \
gt/uc/intel_guc_log.o \
gt/uc/intel_guc_log_debugfs.o \
gt/uc/intel_guc_rc.o \
@@ -197,6 +204,9 @@ i915-y += gt/uc/intel_uc.o \
gt/uc/intel_huc_debugfs.o \
gt/uc/intel_huc_fw.o
+# graphics system controller (GSC) support
+i915-y += gt/intel_gsc.o
+
# modesetting core code
i915-y += \
display/hsw_ips.o \
@@ -213,6 +223,8 @@ i915-y += \
display/intel_cursor.o \
display/intel_display.o \
display/intel_display_power.o \
+ display/intel_display_power_map.o \
+ display/intel_display_power_well.o \
display/intel_dmc.o \
display/intel_dpio_phy.o \
display/intel_dpll.o \
@@ -320,13 +332,13 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
# virtual gpu code
i915-y += i915_vgpu.o
-ifeq ($(CONFIG_DRM_I915_GVT),y)
-i915-y += intel_gvt.o
+i915-$(CONFIG_DRM_I915_GVT) += \
+ intel_gvt.o \
+ intel_gvt_mmio_table.o
include $(src)/gvt/Makefile
-endif
obj-$(CONFIG_DRM_I915) += i915.o
-obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
+obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
# header test
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index f67bbaaad8e0..5a957acebfd6 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -5,12 +5,15 @@
* DisplayPort support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code).
*/
+#include <linux/string_helpers.h>
+
#include "g4x_dp.h"
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -22,58 +25,37 @@
#include "intel_pps.h"
#include "vlv_sideband.h"
-struct dp_link_dpll {
- int clock;
- struct dpll dpll;
+static const struct dpll g4x_dpll[] = {
+ { .dot = 162000, .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8, },
+ { .dot = 270000, .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2, },
};
-static const struct dp_link_dpll g4x_dpll[] = {
- { 162000,
- { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
- { 270000,
- { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
+static const struct dpll pch_dpll[] = {
+ { .dot = 162000, .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9, },
+ { .dot = 270000, .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8, },
};
-static const struct dp_link_dpll pch_dpll[] = {
- { 162000,
- { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
- { 270000,
- { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
+static const struct dpll vlv_dpll[] = {
+ { .dot = 162000, .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81, },
+ { .dot = 270000, .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27, },
};
-static const struct dp_link_dpll vlv_dpll[] = {
- { 162000,
- { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
- { 270000,
- { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
-};
-
-/*
- * CHV supports eDP 1.4 that have more link rates.
- * Below only provides the fixed rate but exclude variable rate.
- */
-static const struct dp_link_dpll chv_dpll[] = {
- /*
- * CHV requires to program fractional division for m2.
- * m2 is stored in fixed point format using formula below
- * (m2_int << 22) | m2_fraction
- */
- { 162000, /* m2_int = 32, m2_fraction = 1677722 */
- { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
- { 270000, /* m2_int = 27, m2_fraction = 0 */
- { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
+static const struct dpll chv_dpll[] = {
+ /* m2 is .22 binary fixed point */
+ { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
+ { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
};
const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
{
- return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll;
+ return IS_CHERRYVIEW(i915) ? &chv_dpll[0] : &vlv_dpll[0];
}
void g4x_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct dp_link_dpll *divisor = NULL;
+ const struct dpll *divisor = NULL;
int i, count = 0;
if (IS_G4X(dev_priv)) {
@@ -92,8 +74,8 @@ void g4x_dp_set_clock(struct intel_encoder *encoder,
if (divisor && count) {
for (i = 0; i < count; i++) {
- if (pipe_config->port_clock == divisor[i].clock) {
- pipe_config->dpll = divisor[i].dpll;
+ if (pipe_config->port_clock == divisor[i].dot) {
+ pipe_config->dpll = divisor[i];
pipe_config->clock_set = true;
break;
}
@@ -192,7 +174,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
I915_STATE_WARN(cur_state != state,
"[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- onoff(state), onoff(cur_state));
+ str_on_off(state), str_on_off(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@@ -202,7 +184,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
+ str_on_off(state), str_on_off(cur_state));
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
@@ -514,9 +496,7 @@ static void intel_disable_dp(struct intel_atomic_state *state,
intel_dp->link_trained = false;
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder,
- old_crtc_state, old_conn_state);
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
/*
* Make sure the panel is off before trying to change the mode.
@@ -677,9 +657,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
- enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
@@ -713,11 +691,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
intel_dp_start_link_train(intel_dp, pipe_config);
intel_dp_stop_link_train(intel_dp, pipe_config);
- if (pipe_config->has_audio) {
- drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
- pipe_name(pipe));
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
- }
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void g4x_enable_dp(struct intel_atomic_state *state,
@@ -1402,7 +1376,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
+ intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->pipe_mask = BIT(PIPE_C);
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 06e00b1eaa7c..5fbd2ae95869 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -10,6 +10,7 @@
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
@@ -143,19 +144,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
&pipe_config->infoframes.hdmi);
}
-static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
-
- drm_WARN_ON(&i915->drm, !pipe_config->has_hdmi_sink);
- drm_dbg_kms(&i915->drm, "Enabling HDMI audio on pipe %c\n",
- pipe_name(crtc->pipe));
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
-}
-
static void g4x_enable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -175,8 +163,9 @@ static void g4x_enable_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- if (pipe_config->has_audio)
- intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
+ !pipe_config->has_hdmi_sink);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void ibx_enable_hdmi(struct intel_atomic_state *state,
@@ -227,8 +216,9 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
- if (pipe_config->has_audio)
- intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
+ !pipe_config->has_hdmi_sink);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void cpt_enable_hdmi(struct intel_atomic_state *state,
@@ -281,8 +271,9 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
- if (pipe_config->has_audio)
- intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
+ !pipe_config->has_hdmi_sink);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void vlv_enable_hdmi(struct intel_atomic_state *state,
@@ -356,9 +347,7 @@ static void g4x_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder,
- old_crtc_state, old_conn_state);
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
}
@@ -368,9 +357,7 @@ static void pch_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder,
- old_crtc_state, old_conn_state);
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}
static void pch_post_disable_hdmi(struct intel_atomic_state *state,
@@ -588,7 +575,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
intel_encoder->type = INTEL_OUTPUT_HDMI;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
+ intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
intel_encoder->port = port;
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index a87b65cd41fd..7fe1a4e57654 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -418,9 +418,6 @@ static void i9xx_plane_update_noarm(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
plane_state->view.color_plane[0].mapping_stride);
@@ -441,8 +438,6 @@ static void i9xx_plane_update_noarm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
DISP_HEIGHT(crtc_h - 1) | DISP_WIDTH(crtc_w - 1));
}
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_plane_update_arm(struct intel_plane *plane,
@@ -454,7 +449,6 @@ static void i9xx_plane_update_arm(struct intel_plane *plane,
int x = plane_state->view.color_plane[0].x;
int y = plane_state->view.color_plane[0].y;
u32 dspcntr, dspaddr_offset, linear_offset;
- unsigned long irqflags;
dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
@@ -465,8 +459,6 @@ static void i9xx_plane_update_arm(struct intel_plane *plane,
else
dspaddr_offset = linear_offset;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
@@ -496,14 +488,13 @@ static void i9xx_plane_update_arm(struct intel_plane *plane,
* the control register just before the surface register.
*/
intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+
if (DISPLAY_VER(dev_priv) >= 4)
intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
else
intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i830_plane_update_arm(struct intel_plane *plane,
@@ -525,7 +516,6 @@ static void i9xx_plane_disable_arm(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- unsigned long irqflags;
u32 dspcntr;
/*
@@ -540,15 +530,12 @@ static void i9xx_plane_disable_arm(struct intel_plane *plane,
*/
dspcntr = i9xx_plane_ctl_crtc(crtc_state);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+
if (DISPLAY_VER(dev_priv) >= 4)
intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
else
intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -561,16 +548,14 @@ g4x_primary_async_flip(struct intel_plane *plane,
u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- unsigned long irqflags;
if (async_flip)
dspcntr |= DISP_ASYNC_FLIP;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+
intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -582,12 +567,9 @@ vlv_primary_async_flip(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DSPADDR_VLV(i9xx_plane),
intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 13b07c6fd6be..19bf717fd4cb 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -25,6 +25,7 @@
* Jani Nikula <jani.nikula@intel.com>
*/
+#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
@@ -399,8 +400,8 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
intel_dsi->io_wakeref[port] =
intel_display_power_get(dev_priv,
port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO);
+ POWER_DOMAIN_PORT_DDI_IO_A :
+ POWER_DOMAIN_PORT_DDI_IO_B);
}
}
@@ -1425,8 +1426,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
intel_display_power_put(dev_priv,
port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO,
+ POWER_DOMAIN_PORT_DDI_IO_A :
+ POWER_DOMAIN_PORT_DDI_IO_B,
wakeref);
}
@@ -1967,6 +1968,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
static void icl_dsi_add_properties(struct intel_connector *connector)
{
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_preferred_fixed_mode(connector);
u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) |
@@ -1979,9 +1982,9 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
drm_connector_set_panel_orientation_with_quirk(&connector->base,
- intel_dsi_get_panel_orientation(connector),
- connector->panel.fixed_mode->hdisplay,
- connector->panel.fixed_mode->vdisplay);
+ intel_dsi_get_panel_orientation(connector),
+ fixed_mode->hdisplay,
+ fixed_mode->vdisplay);
}
void icl_dsi_init(struct drm_i915_private *dev_priv)
@@ -1991,7 +1994,6 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *fixed_mode;
enum port port;
if (!intel_bios_is_dsi_present(dev_priv, &port))
@@ -2048,15 +2050,16 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, encoder);
mutex_lock(&dev->mode_config.mutex);
- fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
+ intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
- if (!fixed_mode) {
+ if (!intel_panel_preferred_fixed_mode(intel_connector)) {
drm_err(&dev_priv->drm, "DSI fixed mode info missing\n");
goto err;
}
- intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_init(intel_connector);
+
intel_backlight_setup(intel_connector, INVALID_PIPE);
if (dev_priv->vbt.dsi.config->dual_link)
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 5712688232fb..efe8591619e3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -181,29 +181,67 @@ unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
}
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+ const struct intel_plane_state *plane_state,
+ int color_plane)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int cpp;
- unsigned int pixel_rate;
if (!plane_state->uapi.visible)
return 0;
- pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+ return intel_plane_pixel_rate(crtc_state, plane_state) *
+ fb->format->cpp[color_plane];
+}
+
+static bool
+use_min_ddb(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ return DISPLAY_VER(i915) >= 13 &&
+ crtc_state->uapi.async_flip &&
+ plane->async_flip;
+}
+
+static unsigned int
+intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int width, height;
- cpp = fb->format->cpp[0];
+ if (plane->id == PLANE_CURSOR)
+ return 0;
+
+ if (!plane_state->uapi.visible)
+ return 0;
/*
- * Based on HSD#:1408715493
- * NV12 cpp == 4, P010 cpp == 8
- *
- * FIXME what is the logic behind this?
+ * We calculate extra ddb based on ratio plane rate/total data rate
+ * in case, in some cases we should not allocate extra ddb for the plane,
+ * so do not count its data rate, if this is the case.
*/
- if (fb->format->is_yuv && fb->format->num_planes > 1)
- cpp *= 4;
+ if (use_min_ddb(crtc_state, plane))
+ return 0;
+
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+ height = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ /* UV plane does 1/2 pixel sub-sampling */
+ if (color_plane == 1) {
+ width /= 2;
+ height /= 2;
+ }
- return pixel_rate * cpp;
+ return width * height * fb->format->cpp[color_plane];
}
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
@@ -326,6 +364,9 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
crtc_state->nv12_planes &= ~BIT(plane->id);
crtc_state->c8_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
+ crtc_state->data_rate_y[plane->id] = 0;
+ crtc_state->rel_data_rate[plane->id] = 0;
+ crtc_state->rel_data_rate_y[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
plane_state->uapi.visible = false;
@@ -551,8 +592,27 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
if (new_plane_state->uapi.visible || old_plane_state->uapi.visible)
new_crtc_state->update_planes |= BIT(plane->id);
- new_crtc_state->data_rate[plane->id] =
- intel_plane_data_rate(new_crtc_state, new_plane_state);
+ if (new_plane_state->uapi.visible &&
+ intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
+ new_crtc_state->data_rate_y[plane->id] =
+ intel_plane_data_rate(new_crtc_state, new_plane_state, 0);
+ new_crtc_state->data_rate[plane->id] =
+ intel_plane_data_rate(new_crtc_state, new_plane_state, 1);
+
+ new_crtc_state->rel_data_rate_y[plane->id] =
+ intel_plane_relative_data_rate(new_crtc_state,
+ new_plane_state, 0);
+ new_crtc_state->rel_data_rate[plane->id] =
+ intel_plane_relative_data_rate(new_crtc_state,
+ new_plane_state, 1);
+ } else if (new_plane_state->uapi.visible) {
+ new_crtc_state->data_rate[plane->id] =
+ intel_plane_data_rate(new_crtc_state, new_plane_state, 0);
+
+ new_crtc_state->rel_data_rate[plane->id] =
+ intel_plane_relative_data_rate(new_crtc_state,
+ new_plane_state, 0);
+ }
return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
old_plane_state, new_plane_state);
@@ -616,8 +676,8 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
static struct intel_plane *
skl_next_plane_to_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc,
- struct skl_ddb_entry entries_y[I915_MAX_PLANES],
- struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
+ struct skl_ddb_entry ddb[I915_MAX_PLANES],
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES],
unsigned int *update_mask)
{
struct intel_crtc_state *crtc_state =
@@ -636,17 +696,15 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
!(*update_mask & BIT(plane_id)))
continue;
- if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
- entries_y,
- I915_MAX_PLANES, plane_id) ||
- skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
- entries_uv,
- I915_MAX_PLANES, plane_id))
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb[plane_id],
+ ddb, I915_MAX_PLANES, plane_id) ||
+ skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
+ ddb_y, I915_MAX_PLANES, plane_id))
continue;
*update_mask &= ~BIT(plane_id);
- entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
- entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
+ ddb[plane_id] = crtc_state->wm.skl.plane_ddb[plane_id];
+ ddb_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
return plane;
}
@@ -728,19 +786,17 @@ static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_ddb_entry entries_y[I915_MAX_PLANES];
- struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
u32 update_mask = new_crtc_state->update_planes;
struct intel_plane *plane;
- memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
+ memcpy(ddb, old_crtc_state->wm.skl.plane_ddb,
+ sizeof(old_crtc_state->wm.skl.plane_ddb));
+ memcpy(ddb_y, old_crtc_state->wm.skl.plane_ddb_y,
sizeof(old_crtc_state->wm.skl.plane_ddb_y));
- memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
- sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
- while ((plane = skl_next_plane_to_commit(state, crtc,
- entries_y, entries_uv,
- &update_mask))) {
+ while ((plane = skl_next_plane_to_commit(state, crtc, ddb, ddb_y, &update_mask))) {
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, plane);
@@ -802,8 +858,8 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_rect *src = &plane_state->uapi.src;
struct drm_rect *dst = &plane_state->uapi.dst;
+ const struct drm_rect *clip = &crtc_state->pipe_src;
unsigned int rotation = plane_state->hw.rotation;
- struct drm_rect clip = {};
int hscale, vscale;
if (!fb) {
@@ -823,31 +879,25 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
return -ERANGE;
}
- if (crtc_state->hw.enable) {
- clip.x2 = crtc_state->pipe_src_w;
- clip.y2 = crtc_state->pipe_src_h;
- }
-
- /* right side of the image is on the slave crtc, adjust dst to match */
- if (intel_crtc_is_bigjoiner_slave(crtc_state))
- drm_rect_translate(dst, -crtc_state->pipe_src_w, 0);
-
/*
* FIXME: This might need further adjustment for seamless scaling
* with phase information, for the 2p2 and 2p1 scenarios.
*/
- plane_state->uapi.visible = drm_rect_clip_scaled(src, dst, &clip);
+ plane_state->uapi.visible = drm_rect_clip_scaled(src, dst, clip);
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
if (!can_position && plane_state->uapi.visible &&
- !drm_rect_equals(dst, &clip)) {
+ !drm_rect_equals(dst, clip)) {
drm_dbg_kms(&i915->drm, "Plane must cover entire CRTC\n");
drm_rect_debug_print("dst: ", dst, false);
- drm_rect_debug_print("clip: ", &clip, false);
+ drm_rect_debug_print("clip: ", clip, false);
return -EINVAL;
}
+ /* final plane coordinates will be relative to the plane's pipe */
+ drm_rect_translate(dst, -clip->x1, -clip->y1);
+
return 0;
}
@@ -997,7 +1047,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret < 0)
goto unpin_fb;
- dma_resv_iter_begin(&cursor, obj->base.resv, false);
+ dma_resv_iter_begin(&cursor, obj->base.resv,
+ DMA_RESV_USAGE_WRITE);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
add_rps_boost_after_vblank(new_plane_state->hw.crtc,
fence);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index f4763a53541e..74b6d3b169a7 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -25,7 +25,8 @@ unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
+ const struct intel_plane_state *plane_state,
+ int color_plane);
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state,
struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 3bdca0fe2cee..f0f0dfce27ce 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -337,8 +337,6 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 eldv, tmp;
- drm_dbg_kms(&dev_priv->drm, "Disable audio codec\n");
-
tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
@@ -362,9 +360,6 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- drm_dbg_kms(&dev_priv->drm, "Enable audio codec, %u bytes ELD\n",
- drm_eld_size(eld));
-
tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
@@ -383,7 +378,6 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
len = min(drm_eld_size(eld) / 4, len);
- drm_dbg(&dev_priv->drm, "ELD size %d\n", len);
for (i = 0; i < len; i++)
intel_de_write(dev_priv, G4X_HDMIW_HDMIEDID,
*((const u32 *)eld + i));
@@ -501,9 +495,6 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 tmp;
- drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n",
- transcoder_name(cpu_transcoder));
-
mutex_lock(&dev_priv->audio.mutex);
/* Disable timestamps */
@@ -647,10 +638,6 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- drm_dbg_kms(&dev_priv->drm,
- "Enable audio codec on transcoder %s, %u bytes ELD\n",
- transcoder_name(cpu_transcoder), drm_eld_size(eld));
-
mutex_lock(&dev_priv->audio.mutex);
/* Enable Audio WA for 4k DSC usecases */
@@ -703,11 +690,6 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
- drm_dbg_kms(&dev_priv->drm,
- "Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
- encoder->base.base.id, encoder->base.name,
- pipe_name(pipe));
-
if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
return;
@@ -754,11 +736,6 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
- drm_dbg_kms(&dev_priv->drm,
- "Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
- encoder->base.base.id, encoder->base.name,
- pipe_name(pipe), drm_eld_size(eld));
-
if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
return;
@@ -844,18 +821,20 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
+ if (!crtc_state->has_audio)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on pipe %c, %u bytes ELD\n",
+ connector->base.id, connector->name,
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe), drm_eld_size(connector->eld));
+
/* FIXME precompute the ELD in .compute_config() */
if (!connector->eld[0])
drm_dbg_kms(&dev_priv->drm,
"Bogus ELD on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- drm_dbg(&dev_priv->drm, "ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id,
- connector->name,
- encoder->base.base.id,
- encoder->base.name);
-
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
if (dev_priv->audio.funcs)
@@ -900,9 +879,17 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_connector *connector = old_conn_state->connector;
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
+ if (!old_crtc_state->has_audio)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on pipe %c\n",
+ connector->base.id, connector->name,
+ encoder->base.base.id, encoder->base.name, pipe_name(pipe));
+
if (dev_priv->audio.funcs)
dev_priv->audio.funcs->audio_codec_disable(encoder,
old_crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 98f7ea44042f..c8e1fc53a881 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/pwm.h>
+#include <linux/string_helpers.h>
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -1633,7 +1634,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
drm_dbg_kms(&dev_priv->drm,
"Connector %s backlight initialized, %s, brightness %u/%u\n",
connector->base.name,
- enableddisabled(panel->backlight.enabled),
+ str_enabled_disabled(panel->backlight.enabled),
panel->backlight.level, panel->backlight.max);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 40b5e7ed12c2..0c5638f5b72b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,7 +25,8 @@
*
*/
-#include <drm/dp/drm_dp_helper.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dsc_helper.h>
#include "display/intel_display.h"
#include "display/intel_display_types.h"
@@ -88,7 +89,7 @@ static u32 get_blocksize(const void *block_data)
}
static const void *
-find_section(const void *_bdb, enum bdb_block_id section_id)
+find_raw_section(const void *_bdb, enum bdb_block_id section_id)
{
const struct bdb_header *bdb = _bdb;
const u8 *base = _bdb;
@@ -118,6 +119,406 @@ find_section(const void *_bdb, enum bdb_block_id section_id)
return NULL;
}
+/*
+ * Offset from the start of BDB to the start of the
+ * block data (just past the block header).
+ */
+static u32 block_offset(const void *bdb, enum bdb_block_id section_id)
+{
+ const void *block;
+
+ block = find_raw_section(bdb, section_id);
+ if (!block)
+ return 0;
+
+ return block - bdb;
+}
+
+/* size of the block excluding the header */
+static u32 block_size(const void *bdb, enum bdb_block_id section_id)
+{
+ const void *block;
+
+ block = find_raw_section(bdb, section_id);
+ if (!block)
+ return 0;
+
+ return get_blocksize(block);
+}
+
+struct bdb_block_entry {
+ struct list_head node;
+ enum bdb_block_id section_id;
+ u8 data[];
+};
+
+static const void *
+find_section(struct drm_i915_private *i915,
+ enum bdb_block_id section_id)
+{
+ struct bdb_block_entry *entry;
+
+ list_for_each_entry(entry, &i915->vbt.bdb_blocks, node) {
+ if (entry->section_id == section_id)
+ return entry->data + 3;
+ }
+
+ return NULL;
+}
+
+static const struct {
+ enum bdb_block_id section_id;
+ size_t min_size;
+} bdb_blocks[] = {
+ { .section_id = BDB_GENERAL_FEATURES,
+ .min_size = sizeof(struct bdb_general_features), },
+ { .section_id = BDB_GENERAL_DEFINITIONS,
+ .min_size = sizeof(struct bdb_general_definitions), },
+ { .section_id = BDB_PSR,
+ .min_size = sizeof(struct bdb_psr), },
+ { .section_id = BDB_DRIVER_FEATURES,
+ .min_size = sizeof(struct bdb_driver_features), },
+ { .section_id = BDB_SDVO_LVDS_OPTIONS,
+ .min_size = sizeof(struct bdb_sdvo_lvds_options), },
+ { .section_id = BDB_SDVO_PANEL_DTDS,
+ .min_size = sizeof(struct bdb_sdvo_panel_dtds), },
+ { .section_id = BDB_EDP,
+ .min_size = sizeof(struct bdb_edp), },
+ { .section_id = BDB_LVDS_OPTIONS,
+ .min_size = sizeof(struct bdb_lvds_options), },
+ /*
+ * BDB_LVDS_LFP_DATA depends on BDB_LVDS_LFP_DATA_PTRS,
+ * so keep the two ordered.
+ */
+ { .section_id = BDB_LVDS_LFP_DATA_PTRS,
+ .min_size = sizeof(struct bdb_lvds_lfp_data_ptrs), },
+ { .section_id = BDB_LVDS_LFP_DATA,
+ .min_size = 0, /* special case */ },
+ { .section_id = BDB_LVDS_BACKLIGHT,
+ .min_size = sizeof(struct bdb_lfp_backlight_data), },
+ { .section_id = BDB_LFP_POWER,
+ .min_size = sizeof(struct bdb_lfp_power), },
+ { .section_id = BDB_MIPI_CONFIG,
+ .min_size = sizeof(struct bdb_mipi_config), },
+ { .section_id = BDB_MIPI_SEQUENCE,
+ .min_size = sizeof(struct bdb_mipi_sequence) },
+ { .section_id = BDB_COMPRESSION_PARAMETERS,
+ .min_size = sizeof(struct bdb_compression_parameters), },
+ { .section_id = BDB_GENERIC_DTD,
+ .min_size = sizeof(struct bdb_generic_dtd), },
+};
+
+static size_t lfp_data_min_size(struct drm_i915_private *i915)
+{
+ const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ size_t size;
+
+ ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ if (!ptrs)
+ return 0;
+
+ size = sizeof(struct bdb_lvds_lfp_data);
+ if (ptrs->panel_name.table_size)
+ size = max(size, ptrs->panel_name.offset +
+ sizeof(struct bdb_lvds_lfp_data_tail));
+
+ return size;
+}
+
+static bool validate_lfp_data_ptrs(const void *bdb,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs)
+{
+ int fp_timing_size, dvo_timing_size, panel_pnp_id_size, panel_name_size;
+ int data_block_size, lfp_data_size;
+ int i;
+
+ data_block_size = block_size(bdb, BDB_LVDS_LFP_DATA);
+ if (data_block_size == 0)
+ return false;
+
+ /* always 3 indicating the presence of fp_timing+dvo_timing+panel_pnp_id */
+ if (ptrs->lvds_entries != 3)
+ return false;
+
+ fp_timing_size = ptrs->ptr[0].fp_timing.table_size;
+ dvo_timing_size = ptrs->ptr[0].dvo_timing.table_size;
+ panel_pnp_id_size = ptrs->ptr[0].panel_pnp_id.table_size;
+ panel_name_size = ptrs->panel_name.table_size;
+
+ /* fp_timing has variable size */
+ if (fp_timing_size < 32 ||
+ dvo_timing_size != sizeof(struct lvds_dvo_timing) ||
+ panel_pnp_id_size != sizeof(struct lvds_pnp_id))
+ return false;
+
+ /* panel_name is not present in old VBTs */
+ if (panel_name_size != 0 &&
+ panel_name_size != sizeof(struct lvds_lfp_panel_name))
+ return false;
+
+ lfp_data_size = ptrs->ptr[1].fp_timing.offset - ptrs->ptr[0].fp_timing.offset;
+ if (16 * lfp_data_size > data_block_size)
+ return false;
+
+ /*
+ * Except for vlv/chv machines all real VBTs seem to have 6
+ * unaccounted bytes in the fp_timing table. And it doesn't
+ * appear to be a really intentional hole as the fp_timing
+ * 0xffff terminator is always within those 6 missing bytes.
+ */
+ if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size &&
+ fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size != lfp_data_size)
+ return false;
+
+ if (ptrs->ptr[0].fp_timing.offset + fp_timing_size > ptrs->ptr[0].dvo_timing.offset ||
+ ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset ||
+ ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size)
+ return false;
+
+ /* make sure the table entries have uniform size */
+ for (i = 1; i < 16; i++) {
+ if (ptrs->ptr[i].fp_timing.table_size != fp_timing_size ||
+ ptrs->ptr[i].dvo_timing.table_size != dvo_timing_size ||
+ ptrs->ptr[i].panel_pnp_id.table_size != panel_pnp_id_size)
+ return false;
+
+ if (ptrs->ptr[i].fp_timing.offset - ptrs->ptr[i-1].fp_timing.offset != lfp_data_size ||
+ ptrs->ptr[i].dvo_timing.offset - ptrs->ptr[i-1].dvo_timing.offset != lfp_data_size ||
+ ptrs->ptr[i].panel_pnp_id.offset - ptrs->ptr[i-1].panel_pnp_id.offset != lfp_data_size)
+ return false;
+ }
+
+ /* make sure the tables fit inside the data block */
+ for (i = 0; i < 16; i++) {
+ if (ptrs->ptr[i].fp_timing.offset + fp_timing_size > data_block_size ||
+ ptrs->ptr[i].dvo_timing.offset + dvo_timing_size > data_block_size ||
+ ptrs->ptr[i].panel_pnp_id.offset + panel_pnp_id_size > data_block_size)
+ return false;
+ }
+
+ if (ptrs->panel_name.offset + 16 * panel_name_size > data_block_size)
+ return false;
+
+ return true;
+}
+
+/* make the data table offsets relative to the data block */
+static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block)
+{
+ struct bdb_lvds_lfp_data_ptrs *ptrs = ptrs_block;
+ u32 offset;
+ int i;
+
+ offset = block_offset(bdb, BDB_LVDS_LFP_DATA);
+
+ for (i = 0; i < 16; i++) {
+ if (ptrs->ptr[i].fp_timing.offset < offset ||
+ ptrs->ptr[i].dvo_timing.offset < offset ||
+ ptrs->ptr[i].panel_pnp_id.offset < offset)
+ return false;
+
+ ptrs->ptr[i].fp_timing.offset -= offset;
+ ptrs->ptr[i].dvo_timing.offset -= offset;
+ ptrs->ptr[i].panel_pnp_id.offset -= offset;
+ }
+
+ if (ptrs->panel_name.table_size) {
+ if (ptrs->panel_name.offset < offset)
+ return false;
+
+ ptrs->panel_name.offset -= offset;
+ }
+
+ return validate_lfp_data_ptrs(bdb, ptrs);
+}
+
+static const void *find_fp_timing_terminator(const u8 *data, int size)
+{
+ int i;
+
+ for (i = 0; i < size - 1; i++) {
+ if (data[i] == 0xff && data[i+1] == 0xff)
+ return &data[i];
+ }
+
+ return NULL;
+}
+
+static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table,
+ int table_size, int total_size)
+{
+ if (total_size < table_size)
+ return total_size;
+
+ table->table_size = table_size;
+ table->offset = total_size - table_size;
+
+ return total_size - table_size;
+}
+
+static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next,
+ const struct lvds_lfp_data_ptr_table *prev,
+ int size)
+{
+ next->table_size = prev->table_size;
+ next->offset = prev->offset + size;
+}
+
+static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
+ const void *bdb)
+{
+ int i, size, table_size, block_size, offset;
+ const void *t0, *t1, *block;
+ struct bdb_lvds_lfp_data_ptrs *ptrs;
+ void *ptrs_block;
+
+ block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!block)
+ return NULL;
+
+ drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n");
+
+ block_size = get_blocksize(block);
+
+ size = block_size;
+ t0 = find_fp_timing_terminator(block, size);
+ if (!t0)
+ return NULL;
+
+ size -= t0 - block - 2;
+ t1 = find_fp_timing_terminator(t0 + 2, size);
+ if (!t1)
+ return NULL;
+
+ size = t1 - t0;
+ if (size * 16 > block_size)
+ return NULL;
+
+ ptrs_block = kzalloc(sizeof(*ptrs) + 3, GFP_KERNEL);
+ if (!ptrs_block)
+ return NULL;
+
+ *(u8 *)(ptrs_block + 0) = BDB_LVDS_LFP_DATA_PTRS;
+ *(u16 *)(ptrs_block + 1) = sizeof(*ptrs);
+ ptrs = ptrs_block + 3;
+
+ table_size = sizeof(struct lvds_pnp_id);
+ size = make_lfp_data_ptr(&ptrs->ptr[0].panel_pnp_id, table_size, size);
+
+ table_size = sizeof(struct lvds_dvo_timing);
+ size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size);
+
+ table_size = t0 - block + 2;
+ size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size);
+
+ if (ptrs->ptr[0].fp_timing.table_size)
+ ptrs->lvds_entries++;
+ if (ptrs->ptr[0].dvo_timing.table_size)
+ ptrs->lvds_entries++;
+ if (ptrs->ptr[0].panel_pnp_id.table_size)
+ ptrs->lvds_entries++;
+
+ if (size != 0 || ptrs->lvds_entries != 3) {
+ kfree(ptrs);
+ return NULL;
+ }
+
+ size = t1 - t0;
+ for (i = 1; i < 16; i++) {
+ next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size);
+ next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size);
+ next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size);
+ }
+
+ size = t1 - t0;
+ table_size = sizeof(struct lvds_lfp_panel_name);
+
+ if (16 * (size + table_size) <= block_size) {
+ ptrs->panel_name.table_size = table_size;
+ ptrs->panel_name.offset = size * 16;
+ }
+
+ offset = block - bdb;
+
+ for (i = 0; i < 16; i++) {
+ ptrs->ptr[i].fp_timing.offset += offset;
+ ptrs->ptr[i].dvo_timing.offset += offset;
+ ptrs->ptr[i].panel_pnp_id.offset += offset;
+ }
+
+ if (ptrs->panel_name.table_size)
+ ptrs->panel_name.offset += offset;
+
+ return ptrs_block;
+}
+
+static void
+init_bdb_block(struct drm_i915_private *i915,
+ const void *bdb, enum bdb_block_id section_id,
+ size_t min_size)
+{
+ struct bdb_block_entry *entry;
+ void *temp_block = NULL;
+ const void *block;
+ size_t block_size;
+
+ block = find_raw_section(bdb, section_id);
+
+ /* Modern VBTs lack the LFP data table pointers block, make one up */
+ if (!block && section_id == BDB_LVDS_LFP_DATA_PTRS) {
+ temp_block = generate_lfp_data_ptrs(i915, bdb);
+ if (temp_block)
+ block = temp_block + 3;
+ }
+ if (!block)
+ return;
+
+ drm_WARN(&i915->drm, min_size == 0,
+ "Block %d min_size is zero\n", section_id);
+
+ block_size = get_blocksize(block);
+
+ entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
+ GFP_KERNEL);
+ if (!entry) {
+ kfree(temp_block);
+ return;
+ }
+
+ entry->section_id = section_id;
+ memcpy(entry->data, block - 3, block_size + 3);
+
+ kfree(temp_block);
+
+ drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n",
+ section_id, block_size, min_size);
+
+ if (section_id == BDB_LVDS_LFP_DATA_PTRS &&
+ !fixup_lfp_data_ptrs(bdb, entry->data + 3)) {
+ drm_err(&i915->drm, "VBT has malformed LFP data table pointers\n");
+ kfree(entry);
+ return;
+ }
+
+ list_add_tail(&entry->node, &i915->vbt.bdb_blocks);
+}
+
+static void init_bdb_blocks(struct drm_i915_private *i915,
+ const void *bdb)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bdb_blocks); i++) {
+ enum bdb_block_id section_id = bdb_blocks[i].section_id;
+ size_t min_size = bdb_blocks[i].min_size;
+
+ if (section_id == BDB_LVDS_LFP_DATA)
+ min_size = lfp_data_min_size(i915);
+
+ init_bdb_block(i915, bdb, section_id, min_size);
+ }
+}
+
static void
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
const struct lvds_dvo_timing *dvo_timing)
@@ -169,82 +570,124 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
}
static const struct lvds_dvo_timing *
-get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
- const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
int index)
{
- /*
- * the size of fp_timing varies on the different platform.
- * So calculate the DVO timing relative offset in LVDS data
- * entry to get the DVO timing entry
- */
-
- int lfp_data_size =
- lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
- lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
- int dvo_timing_offset =
- lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
- lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
- char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
-
- return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+ return (const void *)data + ptrs->ptr[index].dvo_timing.offset;
}
-/* get lvds_fp_timing entry
- * this function may return NULL if the corresponding entry is invalid
- */
static const struct lvds_fp_timing *
-get_lvds_fp_timing(const struct bdb_header *bdb,
- const struct bdb_lvds_lfp_data *data,
+get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data,
const struct bdb_lvds_lfp_data_ptrs *ptrs,
int index)
{
- size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
- u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
- size_t ofs;
+ return (const void *)data + ptrs->ptr[index].fp_timing.offset;
+}
- if (index >= ARRAY_SIZE(ptrs->ptr))
- return NULL;
- ofs = ptrs->ptr[index].fp_timing_offset;
- if (ofs < data_ofs ||
- ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+static const struct bdb_lvds_lfp_data_tail *
+get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs)
+{
+ if (ptrs->panel_name.table_size)
+ return (const void *)data + ptrs->panel_name.offset;
+ else
return NULL;
- return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
+static int opregion_get_panel_type(struct drm_i915_private *i915)
+{
+ return intel_opregion_get_panel_type(i915);
+}
+
+static int vbt_get_panel_type(struct drm_i915_private *i915)
+{
+ const struct bdb_lvds_options *lvds_options;
+
+ lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return -1;
+
+ if (lvds_options->panel_type > 0xf) {
+ drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n",
+ lvds_options->panel_type);
+ return -1;
+ }
+
+ return lvds_options->panel_type;
+}
+
+static int fallback_get_panel_type(struct drm_i915_private *i915)
+{
+ return 0;
+}
+
+enum panel_type {
+ PANEL_TYPE_OPREGION,
+ PANEL_TYPE_VBT,
+ PANEL_TYPE_FALLBACK,
+};
+
+static int get_panel_type(struct drm_i915_private *i915)
+{
+ struct {
+ const char *name;
+ int (*get_panel_type)(struct drm_i915_private *i915);
+ int panel_type;
+ } panel_types[] = {
+ [PANEL_TYPE_OPREGION] = {
+ .name = "OpRegion",
+ .get_panel_type = opregion_get_panel_type,
+ },
+ [PANEL_TYPE_VBT] = {
+ .name = "VBT",
+ .get_panel_type = vbt_get_panel_type,
+ },
+ [PANEL_TYPE_FALLBACK] = {
+ .name = "fallback",
+ .get_panel_type = fallback_get_panel_type,
+ },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
+ panel_types[i].panel_type = panel_types[i].get_panel_type(i915);
+
+ drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf);
+
+ if (panel_types[i].panel_type >= 0)
+ drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n",
+ panel_types[i].name, panel_types[i].panel_type);
+ }
+
+ if (panel_types[PANEL_TYPE_OPREGION].panel_type >= 0)
+ i = PANEL_TYPE_OPREGION;
+ else if (panel_types[PANEL_TYPE_VBT].panel_type >= 0)
+ i = PANEL_TYPE_VBT;
+ else
+ i = PANEL_TYPE_FALLBACK;
+
+ drm_dbg_kms(&i915->drm, "Selected panel type (%s): %d\n",
+ panel_types[i].name, panel_types[i].panel_type);
+
+ return panel_types[i].panel_type;
}
/* Parse general panel options */
static void
-parse_panel_options(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_panel_options(struct drm_i915_private *i915)
{
const struct bdb_lvds_options *lvds_options;
int panel_type;
int drrs_mode;
- int ret;
- lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+ lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
i915->vbt.lvds_dither = lvds_options->pixel_dither;
- ret = intel_opregion_get_panel_type(i915);
- if (ret >= 0) {
- drm_WARN_ON(&i915->drm, ret > 0xf);
- panel_type = ret;
- drm_dbg_kms(&i915->drm, "Panel type: %d (OpRegion)\n",
- panel_type);
- } else {
- if (lvds_options->panel_type > 0xf) {
- drm_dbg_kms(&i915->drm,
- "Invalid VBT panel type 0x%x\n",
- lvds_options->panel_type);
- return;
- }
- panel_type = lvds_options->panel_type;
- drm_dbg_kms(&i915->drm, "Panel type: %d (VBT)\n",
- panel_type);
- }
+ panel_type = get_panel_type(i915);
i915->vbt.panel_type = panel_type;
@@ -257,42 +700,32 @@ parse_panel_options(struct drm_i915_private *i915,
*/
switch (drrs_mode) {
case 0:
- i915->vbt.drrs_type = STATIC_DRRS_SUPPORT;
+ i915->vbt.drrs_type = DRRS_TYPE_STATIC;
drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n");
break;
case 2:
- i915->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
+ i915->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
drm_dbg_kms(&i915->drm,
"DRRS supported mode is seamless\n");
break;
default:
- i915->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+ i915->vbt.drrs_type = DRRS_TYPE_NONE;
drm_dbg_kms(&i915->drm,
"DRRS not supported (VBT input)\n");
break;
}
}
-/* Try to find integrated panel timing data */
static void
parse_lfp_panel_dtd(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+ const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs)
{
- const struct bdb_lvds_lfp_data *lvds_lfp_data;
- const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int panel_type = i915->vbt.panel_type;
- lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
- if (!lvds_lfp_data)
- return;
-
- lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
- if (!lvds_lfp_data_ptrs)
- return;
-
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
panel_type);
@@ -306,34 +739,75 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
drm_dbg_kms(&i915->drm,
- "Found panel mode in BIOS VBT legacy lfp table:\n");
- drm_mode_debug_printmodeline(panel_fixed_mode);
+ "Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(panel_fixed_mode));
- fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+ fp_timing = get_lvds_fp_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
panel_type);
- if (fp_timing) {
- /* check the resolution, just to be sure */
- if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
- fp_timing->y_res == panel_fixed_mode->vdisplay) {
- i915->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
- drm_dbg_kms(&i915->drm,
- "VBT initial LVDS value %x\n",
- i915->vbt.bios_lvds_val);
- }
+
+ /* check the resolution, just to be sure */
+ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+ fp_timing->y_res == panel_fixed_mode->vdisplay) {
+ i915->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+ drm_dbg_kms(&i915->drm,
+ "VBT initial LVDS value %x\n",
+ i915->vbt.bios_lvds_val);
+ }
+}
+
+static void
+parse_lfp_data(struct drm_i915_private *i915)
+{
+ const struct bdb_lvds_lfp_data *data;
+ const struct bdb_lvds_lfp_data_tail *tail;
+ const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ int panel_type = i915->vbt.panel_type;
+
+ ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ if (!ptrs)
+ return;
+
+ data = find_section(i915, BDB_LVDS_LFP_DATA);
+ if (!data)
+ return;
+
+ if (!i915->vbt.lfp_lvds_vbt_mode)
+ parse_lfp_panel_dtd(i915, data, ptrs);
+
+ tail = get_lfp_data_tail(data, ptrs);
+ if (!tail)
+ return;
+
+ if (i915->vbt.version >= 188) {
+ i915->vbt.seamless_drrs_min_refresh_rate =
+ tail->seamless_drrs_min_refresh_rate[panel_type];
+ drm_dbg_kms(&i915->drm,
+ "Seamless DRRS min refresh rate: %d Hz\n",
+ i915->vbt.seamless_drrs_min_refresh_rate);
}
}
static void
-parse_generic_dtd(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_generic_dtd(struct drm_i915_private *i915)
{
const struct bdb_generic_dtd *generic_dtd;
const struct generic_dtd_entry *dtd;
struct drm_display_mode *panel_fixed_mode;
int num_dtd;
- generic_dtd = find_section(bdb, BDB_GENERIC_DTD);
+ /*
+ * Older VBTs provided DTD information for internal displays through
+ * the "LFP panel tables" block (42). As of VBT revision 229 the
+ * DTD information should be provided via a newer "generic DTD"
+ * block (58). Just to be safe, we'll try the new generic DTD block
+ * first on VBT >= 229, but still fall back to trying the old LFP
+ * block if that fails.
+ */
+ if (i915->vbt.version < 229)
+ return;
+
+ generic_dtd = find_section(i915, BDB_GENERIC_DTD);
if (!generic_dtd)
return;
@@ -397,40 +871,21 @@ parse_generic_dtd(struct drm_i915_private *i915,
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
drm_dbg_kms(&i915->drm,
- "Found panel mode in BIOS VBT generic dtd table:\n");
- drm_mode_debug_printmodeline(panel_fixed_mode);
+ "Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(panel_fixed_mode));
i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
}
static void
-parse_panel_dtd(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
-{
- /*
- * Older VBTs provided provided DTD information for internal displays
- * through the "LFP panel DTD" block (42). As of VBT revision 229,
- * that block is now deprecated and DTD information should be provided
- * via a newer "generic DTD" block (58). Just to be safe, we'll
- * try the new generic DTD block first on VBT >= 229, but still fall
- * back to trying the old LFP block if that fails.
- */
- if (bdb->version >= 229)
- parse_generic_dtd(i915, bdb);
- if (!i915->vbt.lfp_lvds_vbt_mode)
- parse_lfp_panel_dtd(i915, bdb);
-}
-
-static void
-parse_lfp_backlight(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_lfp_backlight(struct drm_i915_private *i915)
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct lfp_backlight_data_entry *entry;
int panel_type = i915->vbt.panel_type;
u16 level;
- backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
+ backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
return;
@@ -452,12 +907,12 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
- if (bdb->version >= 191) {
+ if (i915->vbt.version >= 191) {
size_t exp_size;
- if (bdb->version >= 236)
+ if (i915->vbt.version >= 236)
exp_size = sizeof(struct bdb_lfp_backlight_data);
- else if (bdb->version >= 234)
+ else if (i915->vbt.version >= 234)
exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
else
exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
@@ -474,14 +929,14 @@ parse_lfp_backlight(struct drm_i915_private *i915,
i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
i915->vbt.backlight.active_low_pwm = entry->active_low_pwm;
- if (bdb->version >= 234) {
+ if (i915->vbt.version >= 234) {
u16 min_level;
bool scale;
level = backlight_data->brightness_level[panel_type].level;
min_level = backlight_data->brightness_min_level[panel_type].level;
- if (bdb->version >= 236)
+ if (i915->vbt.version >= 236)
scale = backlight_data->brightness_precision_bits[panel_type] == 16;
else
scale = level > 255;
@@ -514,8 +969,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
/* Try to find sdvo panel data */
static void
-parse_sdvo_panel_data(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_sdvo_panel_data(struct drm_i915_private *i915)
{
const struct bdb_sdvo_panel_dtds *dtds;
struct drm_display_mode *panel_fixed_mode;
@@ -531,14 +985,14 @@ parse_sdvo_panel_data(struct drm_i915_private *i915,
if (index == -1) {
const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
- sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ sdvo_lvds_options = find_section(i915, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = sdvo_lvds_options->panel_type;
}
- dtds = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+ dtds = find_section(i915, BDB_SDVO_PANEL_DTDS);
if (!dtds)
return;
@@ -551,8 +1005,8 @@ parse_sdvo_panel_data(struct drm_i915_private *i915,
i915->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
drm_dbg_kms(&i915->drm,
- "Found SDVO panel mode in BIOS VBT tables:\n");
- drm_mode_debug_printmodeline(panel_fixed_mode);
+ "Found SDVO panel mode in BIOS VBT tables: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(panel_fixed_mode));
}
static int intel_bios_ssc_frequency(struct drm_i915_private *i915,
@@ -570,18 +1024,17 @@ static int intel_bios_ssc_frequency(struct drm_i915_private *i915,
}
static void
-parse_general_features(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_general_features(struct drm_i915_private *i915)
{
const struct bdb_general_features *general;
- general = find_section(bdb, BDB_GENERAL_FEATURES);
+ general = find_section(i915, BDB_GENERAL_FEATURES);
if (!general)
return;
i915->vbt.int_tv_support = general->int_tv_support;
/* int_crt_support can't be trusted on earlier platforms */
- if (bdb->version >= 155 &&
+ if (i915->vbt.version >= 155 &&
(HAS_DDI(i915) || IS_VALLEYVIEW(i915)))
i915->vbt.int_crt_support = general->int_crt_support;
i915->vbt.lvds_use_ssc = general->enable_ssc;
@@ -589,7 +1042,7 @@ parse_general_features(struct drm_i915_private *i915,
intel_bios_ssc_frequency(i915, general->ssc_freq);
i915->vbt.display_clock_mode = general->display_clock_mode;
i915->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
- if (bdb->version >= 181) {
+ if (i915->vbt.version >= 181) {
i915->vbt.orientation = general->rotate_180 ?
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
DRM_MODE_PANEL_ORIENTATION_NORMAL;
@@ -597,7 +1050,7 @@ parse_general_features(struct drm_i915_private *i915,
i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
- if (bdb->version >= 249 && general->afc_startup_config) {
+ if (i915->vbt.version >= 249 && general->afc_startup_config) {
i915->vbt.override_afc_startup = true;
i915->vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
}
@@ -695,12 +1148,11 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
}
static void
-parse_driver_features(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_driver_features(struct drm_i915_private *i915)
{
const struct bdb_driver_features *driver;
- driver = find_section(bdb, BDB_DRIVER_FEATURES);
+ driver = find_section(i915, BDB_DRIVER_FEATURES);
if (!driver)
return;
@@ -724,13 +1176,13 @@ parse_driver_features(struct drm_i915_private *i915,
* in the wild with the bits correctly populated. Version
* 108 (on i85x) does not have the bits correctly populated.
*/
- if (bdb->version >= 134 &&
+ if (i915->vbt.version >= 134 &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
i915->vbt.int_lvds_support = 0;
}
- if (bdb->version < 228) {
+ if (i915->vbt.version < 228) {
drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
driver->drrs_enabled);
/*
@@ -740,23 +1192,22 @@ parse_driver_features(struct drm_i915_private *i915,
* driver->drrs_enabled=false
*/
if (!driver->drrs_enabled)
- i915->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+ i915->vbt.drrs_type = DRRS_TYPE_NONE;
i915->vbt.psr.enable = driver->psr_enabled;
}
}
static void
-parse_power_conservation_features(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_power_conservation_features(struct drm_i915_private *i915)
{
const struct bdb_lfp_power *power;
u8 panel_type = i915->vbt.panel_type;
- if (bdb->version < 228)
+ if (i915->vbt.version < 228)
return;
- power = find_section(bdb, BDB_LFP_POWER);
+ power = find_section(i915, BDB_LFP_POWER);
if (!power)
return;
@@ -769,21 +1220,21 @@ parse_power_conservation_features(struct drm_i915_private *i915,
* power->drrs & BIT(panel_type)=false
*/
if (!(power->drrs & BIT(panel_type)))
- i915->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+ i915->vbt.drrs_type = DRRS_TYPE_NONE;
- if (bdb->version >= 232)
+ if (i915->vbt.version >= 232)
i915->vbt.edp.hobl = power->hobl & BIT(panel_type);
}
static void
-parse_edp(struct drm_i915_private *i915, const struct bdb_header *bdb)
+parse_edp(struct drm_i915_private *i915)
{
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
const struct edp_fast_link_params *edp_link_params;
int panel_type = i915->vbt.panel_type;
- edp = find_section(bdb, BDB_EDP);
+ edp = find_section(i915, BDB_EDP);
if (!edp)
return;
@@ -876,7 +1327,7 @@ parse_edp(struct drm_i915_private *i915, const struct bdb_header *bdb)
break;
}
- if (bdb->version >= 173) {
+ if (i915->vbt.version >= 173) {
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
@@ -888,16 +1339,19 @@ parse_edp(struct drm_i915_private *i915, const struct bdb_header *bdb)
i915->vbt.edp.low_vswing = vswing == 0;
}
}
+
+ i915->vbt.edp.drrs_msa_timing_delay =
+ (edp->sdrrs_msa_timing_delay >> (panel_type * 2)) & 3;
}
static void
-parse_psr(struct drm_i915_private *i915, const struct bdb_header *bdb)
+parse_psr(struct drm_i915_private *i915)
{
const struct bdb_psr *psr;
const struct psr_table *psr_table;
int panel_type = i915->vbt.panel_type;
- psr = find_section(bdb, BDB_PSR);
+ psr = find_section(i915, BDB_PSR);
if (!psr) {
drm_dbg_kms(&i915->drm, "No PSR BDB found.\n");
return;
@@ -916,7 +1370,7 @@ parse_psr(struct drm_i915_private *i915, const struct bdb_header *bdb)
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
* Old decimal value is wake up time in multiples of 100 us.
*/
- if (bdb->version >= 205 &&
+ if (i915->vbt.version >= 205 &&
(DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
switch (psr_table->tp1_wakeup_time) {
case 0:
@@ -962,7 +1416,7 @@ parse_psr(struct drm_i915_private *i915, const struct bdb_header *bdb)
i915->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
- if (bdb->version >= 226) {
+ if (i915->vbt.version >= 226) {
u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
@@ -1031,8 +1485,7 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
}
static void
-parse_mipi_config(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_mipi_config(struct drm_i915_private *i915)
{
const struct bdb_mipi_config *start;
const struct mipi_config *config;
@@ -1055,7 +1508,7 @@ parse_mipi_config(struct drm_i915_private *i915,
/* Parse #52 for panel index used from panel_type already
* parsed
*/
- start = find_section(bdb, BDB_MIPI_CONFIG);
+ start = find_section(i915, BDB_MIPI_CONFIG);
if (!start) {
drm_dbg_kms(&i915->drm, "No MIPI config BDB found");
return;
@@ -1082,7 +1535,7 @@ parse_mipi_config(struct drm_i915_private *i915,
return;
}
- parse_dsi_backlight_ports(i915, bdb->version, port);
+ parse_dsi_backlight_ports(i915, i915->vbt.version, port);
/* FIXME is the 90 vs. 270 correct? */
switch (config->rotation) {
@@ -1351,8 +1804,7 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915)
}
static void
-parse_mipi_sequence(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_mipi_sequence(struct drm_i915_private *i915)
{
int panel_type = i915->vbt.panel_type;
const struct bdb_mipi_sequence *sequence;
@@ -1365,7 +1817,7 @@ parse_mipi_sequence(struct drm_i915_private *i915,
if (i915->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
- sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
+ sequence = find_section(i915, BDB_MIPI_SEQUENCE);
if (!sequence) {
drm_dbg_kms(&i915->drm,
"No MIPI Sequence found, parsing complete\n");
@@ -1436,8 +1888,7 @@ err:
}
static void
-parse_compression_parameters(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_compression_parameters(struct drm_i915_private *i915)
{
const struct bdb_compression_parameters *params;
struct intel_bios_encoder_data *devdata;
@@ -1445,10 +1896,10 @@ parse_compression_parameters(struct drm_i915_private *i915,
u16 block_size;
int index;
- if (bdb->version < 198)
+ if (i915->vbt.version < 198)
return;
- params = find_section(bdb, BDB_COMPRESSION_PARAMETERS);
+ params = find_section(i915, BDB_COMPRESSION_PARAMETERS);
if (params) {
/* Sanity checks */
if (params->entry_size != sizeof(params->data[0])) {
@@ -1955,6 +2406,12 @@ static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devd
fallthrough;
case HDMI_MAX_DATA_RATE_PLATFORM:
return 0;
+ case HDMI_MAX_DATA_RATE_594:
+ return 594000;
+ case HDMI_MAX_DATA_RATE_340:
+ return 340000;
+ case HDMI_MAX_DATA_RATE_300:
+ return 300000;
case HDMI_MAX_DATA_RATE_297:
return 297000;
case HDMI_MAX_DATA_RATE_165:
@@ -2077,8 +2534,7 @@ static void parse_ddi_ports(struct drm_i915_private *i915)
}
static void
-parse_general_definitions(struct drm_i915_private *i915,
- const struct bdb_header *bdb)
+parse_general_definitions(struct drm_i915_private *i915)
{
const struct bdb_general_definitions *defs;
struct intel_bios_encoder_data *devdata;
@@ -2088,7 +2544,7 @@ parse_general_definitions(struct drm_i915_private *i915,
u16 block_size;
int bus_pin;
- defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ defs = find_section(i915, BDB_GENERAL_DEFINITIONS);
if (!defs) {
drm_dbg_kms(&i915->drm,
"No general definition block is found, no devices defined.\n");
@@ -2108,31 +2564,31 @@ parse_general_definitions(struct drm_i915_private *i915,
if (intel_gmbus_is_valid_pin(i915, bus_pin))
i915->vbt.crt_ddc_pin = bus_pin;
- if (bdb->version < 106) {
+ if (i915->vbt.version < 106) {
expected_size = 22;
- } else if (bdb->version < 111) {
+ } else if (i915->vbt.version < 111) {
expected_size = 27;
- } else if (bdb->version < 195) {
+ } else if (i915->vbt.version < 195) {
expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
- } else if (bdb->version == 195) {
+ } else if (i915->vbt.version == 195) {
expected_size = 37;
- } else if (bdb->version <= 215) {
+ } else if (i915->vbt.version <= 215) {
expected_size = 38;
- } else if (bdb->version <= 237) {
+ } else if (i915->vbt.version <= 237) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
BUILD_BUG_ON(sizeof(*child) < 39);
drm_dbg(&i915->drm,
"Expected child device config size for VBT version %u not known; assuming %u\n",
- bdb->version, expected_size);
+ i915->vbt.version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (defs->child_dev_size != expected_size)
drm_err(&i915->drm,
"Unexpected child device config size %u (expected %u for VBT version %u)\n",
- defs->child_dev_size, expected_size, bdb->version);
+ defs->child_dev_size, expected_size, i915->vbt.version);
/* The legacy sized child device config is the minimum we need. */
if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
@@ -2457,6 +2913,7 @@ void intel_bios_init(struct drm_i915_private *i915)
const struct bdb_header *bdb;
INIT_LIST_HEAD(&i915->vbt.display_devices);
+ INIT_LIST_HEAD(&i915->vbt.bdb_blocks);
if (!HAS_DISPLAY(i915)) {
drm_dbg_kms(&i915->drm,
@@ -2488,24 +2945,27 @@ void intel_bios_init(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm,
"VBT signature \"%.*s\", BDB version %d\n",
- (int)sizeof(vbt->signature), vbt->signature, bdb->version);
+ (int)sizeof(vbt->signature), vbt->signature, i915->vbt.version);
+
+ init_bdb_blocks(i915, bdb);
/* Grab useful general definitions */
- parse_general_features(i915, bdb);
- parse_general_definitions(i915, bdb);
- parse_panel_options(i915, bdb);
- parse_panel_dtd(i915, bdb);
- parse_lfp_backlight(i915, bdb);
- parse_sdvo_panel_data(i915, bdb);
- parse_driver_features(i915, bdb);
- parse_power_conservation_features(i915, bdb);
- parse_edp(i915, bdb);
- parse_psr(i915, bdb);
- parse_mipi_config(i915, bdb);
- parse_mipi_sequence(i915, bdb);
+ parse_general_features(i915);
+ parse_general_definitions(i915);
+ parse_panel_options(i915);
+ parse_generic_dtd(i915);
+ parse_lfp_data(i915);
+ parse_lfp_backlight(i915);
+ parse_sdvo_panel_data(i915);
+ parse_driver_features(i915);
+ parse_power_conservation_features(i915);
+ parse_edp(i915);
+ parse_psr(i915);
+ parse_mipi_config(i915);
+ parse_mipi_sequence(i915);
/* Depends on child device list */
- parse_compression_parameters(i915, bdb);
+ parse_compression_parameters(i915);
out:
if (!vbt) {
@@ -2527,14 +2987,20 @@ out:
*/
void intel_bios_driver_remove(struct drm_i915_private *i915)
{
- struct intel_bios_encoder_data *devdata, *n;
+ struct intel_bios_encoder_data *devdata, *nd;
+ struct bdb_block_entry *entry, *ne;
- list_for_each_entry_safe(devdata, n, &i915->vbt.display_devices, node) {
+ list_for_each_entry_safe(devdata, nd, &i915->vbt.display_devices, node) {
list_del(&devdata->node);
kfree(devdata->dsc);
kfree(devdata);
}
+ list_for_each_entry_safe(entry, ne, &i915->vbt.bdb_blocks, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+
kfree(i915->vbt.sdvo_lvds_vbt_mode);
i915->vbt.sdvo_lvds_vbt_mode = NULL;
kfree(i915->vbt.lfp_lvds_vbt_mode);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index adf58c58513b..37bd7b17f3d0 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic_state_helper.h>
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
@@ -124,8 +125,8 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
- ICL_PCODE_POINTS_RESTRICTED_MASK,
- ICL_PCODE_POINTS_RESTRICTED,
+ ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
+ ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
1);
if (ret < 0) {
@@ -464,20 +465,25 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
static void dg2_get_bw_info(struct drm_i915_private *i915)
{
- struct intel_bw_info *bi = &i915->max_bw[0];
+ unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
+ int num_groups = ARRAY_SIZE(i915->max_bw);
+ int i;
/*
* DG2 doesn't have SAGV or QGV points, just a constant max bandwidth
- * that doesn't depend on the number of planes enabled. Create a
- * single dummy QGV point to reflect that. DG2-G10 platforms have a
- * constant 50 GB/s bandwidth, whereas DG2-G11 platforms have 38 GB/s.
+ * that doesn't depend on the number of planes enabled. So fill all the
+ * plane group with constant bw information for uniformity with other
+ * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth,
+ * whereas DG2-G11 platforms have 38 GB/s.
*/
- bi->num_planes = 1;
- bi->num_qgv_points = 1;
- if (IS_DG2_G11(i915))
- bi->deratedbw[0] = 38000;
- else
- bi->deratedbw[0] = 50000;
+ for (i = 0; i < num_groups; i++) {
+ struct intel_bw_info *bi = &i915->max_bw[i];
+
+ bi->num_planes = 1;
+ /* Need only one dummy QGV point per group */
+ bi->num_qgv_points = 1;
+ bi->deratedbw[0] = deratedbw;
+ }
i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
}
@@ -578,6 +584,7 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
unsigned int data_rate = 0;
enum plane_id plane_id;
@@ -590,11 +597,26 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
continue;
data_rate += crtc_state->data_rate[plane_id];
+
+ if (DISPLAY_VER(i915) < 11)
+ data_rate += crtc_state->data_rate_y[plane_id];
}
return data_rate;
}
+/* "Maximum Pipe Read Bandwidth" */
+static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) < 12)
+ return 0;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
+}
+
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
@@ -633,8 +655,8 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active(dev_priv))
- data_rate = data_rate * 105 / 100;
+ if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv))
+ data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
}
@@ -674,6 +696,53 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+static bool intel_bw_state_changed(struct drm_i915_private *i915,
+ const struct intel_bw_state *old_bw_state,
+ const struct intel_bw_state *new_bw_state)
+{
+ enum pipe pipe;
+
+ for_each_pipe(i915, pipe) {
+ const struct intel_dbuf_bw *old_crtc_bw =
+ &old_bw_state->dbuf_bw[pipe];
+ const struct intel_dbuf_bw *new_crtc_bw =
+ &new_bw_state->dbuf_bw[pipe];
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(i915, slice) {
+ if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] ||
+ old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice])
+ return true;
+ }
+
+ if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe])
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state,
+ struct intel_crtc *crtc,
+ enum plane_id plane_id,
+ const struct skl_ddb_entry *ddb,
+ unsigned int data_rate)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb);
+ enum dbuf_slice slice;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) {
+ crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate);
+ crtc_bw->active_planes[slice] |= BIT(plane_id);
+ }
+}
+
static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
@@ -682,136 +751,145 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
enum plane_id plane_id;
- memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
+ memset(crtc_bw, 0, sizeof(*crtc_bw));
if (!crtc_state->hw.active)
return;
for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- const struct skl_ddb_entry *ddb_uv =
- &crtc_state->wm.skl.plane_ddb_uv[plane_id];
- unsigned int data_rate = crtc_state->data_rate[plane_id];
- unsigned int dbuf_mask = 0;
- enum dbuf_slice slice;
-
- dbuf_mask |= skl_ddb_dbuf_slice_mask(i915, ddb_y);
- dbuf_mask |= skl_ddb_dbuf_slice_mask(i915, ddb_uv);
-
/*
- * FIXME: To calculate that more properly we probably
- * need to split per plane data_rate into data_rate_y
- * and data_rate_uv for multiplanar formats in order not
- * to get accounted those twice if they happen to reside
- * on different slices.
- * However for pre-icl this would work anyway because
- * we have only single slice and for icl+ uv plane has
- * non-zero data rate.
- * So in worst case those calculation are a bit
- * pessimistic, which shouldn't pose any significant
- * problem anyway.
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
*/
- for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask)
- crtc_bw->used_bw[slice] += data_rate;
- }
-}
-
-int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_bw_state *new_bw_state = NULL;
- struct intel_bw_state *old_bw_state = NULL;
- const struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
- int max_bw = 0;
- enum pipe pipe;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
+ if (plane_id == PLANE_CURSOR)
+ continue;
- old_bw_state = intel_atomic_get_old_bw_state(state);
+ skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb[plane_id],
+ crtc_state->data_rate[plane_id]);
- skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
+ if (DISPLAY_VER(i915) < 11)
+ skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb_y[plane_id],
+ crtc_state->data_rate[plane_id]);
}
+}
- if (!old_bw_state)
- return 0;
+/* "Maximum Data Buffer Bandwidth" */
+static int
+intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state)
+{
+ unsigned int total_max_bw = 0;
+ enum dbuf_slice slice;
- for_each_pipe(dev_priv, pipe) {
- struct intel_dbuf_bw *crtc_bw;
- enum dbuf_slice slice;
+ for_each_dbuf_slice(i915, slice) {
+ int num_active_planes = 0;
+ unsigned int max_bw = 0;
+ enum pipe pipe;
- crtc_bw = &new_bw_state->dbuf_bw[pipe];
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_pipe(i915, pipe) {
+ const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe];
- for_each_dbuf_slice(dev_priv, slice) {
- /*
- * Current experimental observations show that contrary
- * to BSpec we get underruns once we exceed 64 * CDCLK
- * for slices in total.
- * As a temporary measure in order not to keep CDCLK
- * bumped up all the time we calculate CDCLK according
- * to this formula for overall bw consumed by slices.
- */
- max_bw += crtc_bw->used_bw[slice];
+ max_bw = max(crtc_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(crtc_bw->active_planes[slice]);
}
+ max_bw *= num_active_planes;
+
+ total_max_bw = max(total_max_bw, max_bw);
}
- new_bw_state->min_cdclk = max_bw / 64;
+ return DIV_ROUND_UP(total_max_bw, 64);
+}
- if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
- int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+int intel_bw_min_cdclk(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state)
+{
+ enum pipe pipe;
+ int min_cdclk;
- if (ret)
- return ret;
- }
+ min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
- return 0;
+ for_each_pipe(i915, pipe)
+ min_cdclk = max(bw_state->min_cdclk[pipe], min_cdclk);
+
+ return min_cdclk;
}
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
+int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state = NULL;
- struct intel_bw_state *old_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ const struct intel_cdclk_state *cdclk_state;
const struct intel_crtc_state *crtc_state;
+ int old_min_cdclk, new_min_cdclk;
struct intel_crtc *crtc;
- int min_cdclk = 0;
- enum pipe pipe;
int i;
+ if (DISPLAY_VER(dev_priv) < 9)
+ return 0;
+
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
+
+ new_bw_state->min_cdclk[crtc->pipe] =
+ intel_bw_crtc_min_cdclk(crtc_state);
}
if (!old_bw_state)
return 0;
- for_each_pipe(dev_priv, pipe) {
- struct intel_cdclk_state *cdclk_state;
+ if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) {
+ int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
- cdclk_state = intel_atomic_get_new_cdclk_state(state);
- if (!cdclk_state)
- return 0;
+ old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state);
+ new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state);
- min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
- }
+ /*
+ * No need to check against the cdclk state if
+ * the min cdclk doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to bandwidth
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_min_cdclk <= old_min_cdclk)
+ return 0;
- new_bw_state->min_cdclk = min_cdclk;
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
- if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
- int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ /*
+ * No need to recalculate the cdclk state if
+ * the min cdclk doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to bandwidth
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
+ return 0;
- if (ret)
- return ret;
- }
+ drm_dbg_kms(&dev_priv->drm,
+ "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
+ new_min_cdclk, cdclk_state->bw_min_cdclk);
+ *need_cdclk_calc = true;
return 0;
}
@@ -820,7 +898,7 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
{
unsigned int num_psf_gv_points = i915->max_bw[0].num_psf_gv_points;
unsigned int num_qgv_points = i915->max_bw[0].num_qgv_points;
- u16 mask = 0;
+ u16 qgv_points = 0, psf_points = 0;
/*
* We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
@@ -828,12 +906,12 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
* So need to operate only with those returned from PCode.
*/
if (num_qgv_points > 0)
- mask |= REG_GENMASK(num_qgv_points - 1, 0);
+ qgv_points = GENMASK(num_qgv_points - 1, 0);
if (num_psf_gv_points > 0)
- mask |= REG_GENMASK(num_psf_gv_points - 1, 0) << ADLS_PSF_PT_SHIFT;
+ psf_points = GENMASK(num_psf_gv_points - 1, 0);
- return mask;
+ return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
}
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
@@ -890,7 +968,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
unsigned int data_rate;
unsigned int num_active_planes;
int i, ret;
- u32 allowed_points = 0;
+ u16 qgv_points = 0, psf_points = 0;
unsigned int max_bw_point = 0, max_bw = 0;
unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
@@ -948,7 +1026,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
max_bw = max_data_rate;
}
if (max_data_rate >= data_rate)
- allowed_points |= REG_FIELD_PREP(ADLS_QGV_PT_MASK, BIT(i));
+ qgv_points |= BIT(i);
drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
@@ -958,7 +1036,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
unsigned int max_data_rate = adl_psf_bw(dev_priv, i);
if (max_data_rate >= data_rate)
- allowed_points |= REG_FIELD_PREP(ADLS_PSF_PT_MASK, BIT(i));
+ psf_points |= BIT(i);
drm_dbg_kms(&dev_priv->drm, "PSF GV point %d: max bw %d"
" required %d\n",
@@ -970,20 +1048,18 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
* left, so if we couldn't - simply reject the configuration for obvious
* reasons.
*/
- if ((allowed_points & ADLS_QGV_PT_MASK) == 0) {
+ if (qgv_points == 0) {
drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
- if (num_psf_gv_points > 0) {
- if ((allowed_points & ADLS_PSF_PT_MASK) == 0) {
- drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory"
- " bandwidth %d for display configuration(%d active planes).\n",
- data_rate, num_active_planes);
- return -EINVAL;
- }
+ if (num_psf_gv_points > 0 && psf_points == 0) {
+ drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory"
+ " bandwidth %d for display configuration(%d active planes).\n",
+ data_rate, num_active_planes);
+ return -EINVAL;
}
/*
@@ -992,16 +1068,18 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
* cause.
*/
if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
- allowed_points &= ADLS_PSF_PT_MASK;
- allowed_points |= BIT(max_bw_point);
+ qgv_points = BIT(max_bw_point);
drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
max_bw_point);
}
+
/*
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask = ~allowed_points &
+ new_bw_state->qgv_points_mask =
+ ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) &
icl_qgv_points_mask(dev_priv);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 0ceaed1c9656..cb7ee3a24a58 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -17,7 +17,8 @@ struct intel_atomic_state;
struct intel_crtc_state;
struct intel_dbuf_bw {
- int used_bw[I915_MAX_DBUF_SLICES];
+ unsigned int max_bw[I915_MAX_DBUF_SLICES];
+ u8 active_planes[I915_MAX_DBUF_SLICES];
};
struct intel_bw_state {
@@ -40,10 +41,9 @@ struct intel_bw_state {
*/
u16 qgv_points_mask;
+ int min_cdclk[I915_MAX_PIPES];
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
-
- int min_cdclk;
};
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
@@ -64,7 +64,9 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
u32 points_mask);
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state);
-int skl_bw_calc_min_cdclk(struct intel_atomic_state *state);
+int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc);
+int intel_bw_min_cdclk(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state);
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 8888fda8b701..b2017d8161b4 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -72,7 +72,6 @@ struct intel_cdclk_funcs {
void (*set_cdclk)(struct drm_i915_private *i915,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe);
- int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
u8 (*calc_voltage_level)(int cdclk);
};
@@ -83,12 +82,6 @@ void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config);
}
-static int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- return dev_priv->cdclk_funcs->bw_calc_min_cdclk(state);
-}
-
static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
@@ -2325,13 +2318,6 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
dev_priv->max_cdclk_freq));
}
- if (min_cdclk > dev_priv->max_cdclk_freq) {
- drm_dbg_kms(&dev_priv->drm,
- "required cdclk (%d kHz) exceeds max (%d kHz)\n",
- min_cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
-
return min_cdclk;
}
@@ -2339,7 +2325,7 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
{
struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_bw_state *bw_state = NULL;
+ const struct intel_bw_state *bw_state;
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int min_cdclk, i;
@@ -2352,10 +2338,6 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
if (min_cdclk < 0)
return min_cdclk;
- bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(bw_state))
- return PTR_ERR(bw_state);
-
if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk)
continue;
@@ -2366,14 +2348,31 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
return ret;
}
- min_cdclk = cdclk_state->force_min_cdclk;
- for_each_pipe(dev_priv, pipe) {
- min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+ bw_state = intel_atomic_get_new_bw_state(state);
+ if (bw_state) {
+ min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state);
- if (!bw_state)
- continue;
+ if (cdclk_state->bw_min_cdclk != min_cdclk) {
+ int ret;
+
+ cdclk_state->bw_min_cdclk = min_cdclk;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+ }
+ }
+
+ min_cdclk = max(cdclk_state->force_min_cdclk,
+ cdclk_state->bw_min_cdclk);
+ for_each_pipe(dev_priv, pipe)
+ min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
- min_cdclk = max(bw_state->min_cdclk, min_cdclk);
+ if (min_cdclk > dev_priv->max_cdclk_freq) {
+ drm_dbg_kms(&dev_priv->drm,
+ "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+ min_cdclk, dev_priv->max_cdclk_freq);
+ return -EINVAL;
}
return min_cdclk;
@@ -2654,14 +2653,10 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
int intel_cdclk_atomic_check(struct intel_atomic_state *state,
bool *need_cdclk_calc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_cdclk_state *old_cdclk_state;
const struct intel_cdclk_state *new_cdclk_state;
struct intel_plane_state *plane_state;
- struct intel_bw_state *new_bw_state;
struct intel_plane *plane;
- int min_cdclk = 0;
- enum pipe pipe;
int ret;
int i;
@@ -2676,6 +2671,10 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state,
return ret;
}
+ ret = intel_bw_calc_min_cdclk(state, need_cdclk_calc);
+ if (ret)
+ return ret;
+
old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
@@ -2683,23 +2682,6 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state,
old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
*need_cdclk_calc = true;
- ret = intel_cdclk_bw_calc_min_cdclk(state);
- if (ret)
- return ret;
-
- new_bw_state = intel_atomic_get_new_bw_state(state);
-
- if (!new_cdclk_state || !new_bw_state)
- return 0;
-
- for_each_pipe(i915, pipe) {
- min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
-
- /* Currently do this change only if we need to increase */
- if (new_bw_state->min_cdclk > min_cdclk)
- *need_cdclk_calc = true;
- }
-
return 0;
}
@@ -3072,7 +3054,6 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
static const struct intel_cdclk_funcs tgl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
- .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
.calc_voltage_level = tgl_calc_voltage_level,
};
@@ -3080,7 +3061,6 @@ static const struct intel_cdclk_funcs tgl_cdclk_funcs = {
static const struct intel_cdclk_funcs ehl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
- .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
.calc_voltage_level = ehl_calc_voltage_level,
};
@@ -3088,7 +3068,6 @@ static const struct intel_cdclk_funcs ehl_cdclk_funcs = {
static const struct intel_cdclk_funcs icl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
- .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
.calc_voltage_level = icl_calc_voltage_level,
};
@@ -3096,7 +3075,6 @@ static const struct intel_cdclk_funcs icl_cdclk_funcs = {
static const struct intel_cdclk_funcs bxt_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
- .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
.calc_voltage_level = bxt_calc_voltage_level,
};
@@ -3104,53 +3082,45 @@ static const struct intel_cdclk_funcs bxt_cdclk_funcs = {
static const struct intel_cdclk_funcs skl_cdclk_funcs = {
.get_cdclk = skl_get_cdclk,
.set_cdclk = skl_set_cdclk,
- .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
.modeset_calc_cdclk = skl_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs bdw_cdclk_funcs = {
.get_cdclk = bdw_get_cdclk,
.set_cdclk = bdw_set_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = bdw_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs chv_cdclk_funcs = {
.get_cdclk = vlv_get_cdclk,
.set_cdclk = chv_set_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = vlv_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs vlv_cdclk_funcs = {
.get_cdclk = vlv_get_cdclk,
.set_cdclk = vlv_set_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = vlv_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs hsw_cdclk_funcs = {
.get_cdclk = hsw_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
/* SNB, IVB, 965G, 945G */
static const struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = {
.get_cdclk = fixed_400mhz_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs ilk_cdclk_funcs = {
.get_cdclk = fixed_450mhz_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs gm45_cdclk_funcs = {
.get_cdclk = gm45_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
@@ -3158,7 +3128,6 @@ static const struct intel_cdclk_funcs gm45_cdclk_funcs = {
static const struct intel_cdclk_funcs i965gm_cdclk_funcs = {
.get_cdclk = i965gm_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
@@ -3166,19 +3135,16 @@ static const struct intel_cdclk_funcs i965gm_cdclk_funcs = {
static const struct intel_cdclk_funcs pnv_cdclk_funcs = {
.get_cdclk = pnv_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs g33_cdclk_funcs = {
.get_cdclk = g33_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs i945gm_cdclk_funcs = {
.get_cdclk = i945gm_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
@@ -3186,37 +3152,31 @@ static const struct intel_cdclk_funcs i945gm_cdclk_funcs = {
static const struct intel_cdclk_funcs i915gm_cdclk_funcs = {
.get_cdclk = i915gm_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs i915g_cdclk_funcs = {
.get_cdclk = fixed_333mhz_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs i865g_cdclk_funcs = {
.get_cdclk = fixed_266mhz_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs i85x_cdclk_funcs = {
.get_cdclk = i85x_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs i845g_cdclk_funcs = {
.get_cdclk = fixed_200mhz_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
static const struct intel_cdclk_funcs i830_cdclk_funcs = {
.get_cdclk = fixed_133mhz_get_cdclk,
- .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index df66f66fbad0..b535cf6a7d9e 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -36,6 +36,8 @@ struct intel_cdclk_state {
*/
struct intel_cdclk_config actual;
+ /* minimum acceptable cdclk to satisfy bandwidth requirements */
+ int bw_min_cdclk;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index e94ec57260f1..34128c9c635c 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -31,12 +31,21 @@
struct intel_color_funcs {
int (*color_check)(struct intel_crtc_state *crtc_state);
/*
- * Program double buffered color management registers during
- * vblank evasion. The registers should then latch during the
- * next vblank start, alongside any other double buffered registers
- * involved with the same commit.
+ * Program non-arming double buffered color management registers
+ * before vblank evasion. The registers should then latch after
+ * the arming register is written (by color_commit_arm()) during
+ * the next vblank start, alongside any other double buffered
+ * registers involved with the same commit. This hook is optional.
+ */
+ void (*color_commit_noarm)(const struct intel_crtc_state *crtc_state);
+ /*
+ * Program arming double buffered color management registers
+ * during vblank evasion. The registers (and whatever other registers
+ * they arm that were written by color_commit_noarm) should then latch
+ * during the next vblank start, alongside any other double buffered
+ * registers involved with the same commit.
*/
- void (*color_commit)(const struct intel_crtc_state *crtc_state);
+ void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
/*
* Load LUTs (and other single buffered color management
* registers). Will (hopefully) be called during the vblank
@@ -337,15 +346,11 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
ilk_csc_coeff_identity,
ilk_csc_off_zero);
}
-
- intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
- crtc_state->csc_mode);
}
static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (crtc_state->hw.ctm) {
u16 coeff[9];
@@ -364,9 +369,6 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
ilk_csc_coeff_limited_range,
ilk_csc_postoff_limited_range);
}
-
- intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
- crtc_state->csc_mode);
}
static void chv_load_cgm_csc(struct intel_crtc *crtc,
@@ -491,7 +493,17 @@ static void icl_lut_multi_seg_pack(struct drm_color_lut *entry, u32 ldw, u32 udw
REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, ldw);
}
-static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
+static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+ icl_load_csc_matrix(crtc_state);
+}
+
+static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+ ilk_load_csc_matrix(crtc_state);
+}
+
+static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -504,7 +516,7 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, PIPECONF(pipe), val);
}
-static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
+static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -516,10 +528,11 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
intel_de_write(dev_priv, PIPECONF(pipe), val);
- ilk_load_csc_matrix(crtc_state);
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(pipe),
+ crtc_state->csc_mode);
}
-static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
+static void hsw_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -527,10 +540,11 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
crtc_state->gamma_mode);
- ilk_load_csc_matrix(crtc_state);
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
-static void skl_color_commit(const struct intel_crtc_state *crtc_state)
+static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -551,10 +565,8 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
crtc_state->gamma_mode);
- if (DISPLAY_VER(dev_priv) >= 11)
- icl_load_csc_matrix(crtc_state);
- else
- ilk_load_csc_matrix(crtc_state);
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
static void i9xx_load_lut_8(struct intel_crtc *crtc,
@@ -1169,11 +1181,19 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
dev_priv->color_funcs->load_luts(crtc_state);
}
-void intel_color_commit(const struct intel_crtc_state *crtc_state)
+void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (dev_priv->color_funcs->color_commit_noarm)
+ dev_priv->color_funcs->color_commit_noarm(crtc_state);
+}
+
+void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->color_funcs->color_commit(crtc_state);
+ dev_priv->color_funcs->color_commit_arm(crtc_state);
}
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
@@ -2132,70 +2152,77 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
static const struct intel_color_funcs chv_color_funcs = {
.color_check = chv_color_check,
- .color_commit = i9xx_color_commit,
+ .color_commit_arm = i9xx_color_commit_arm,
.load_luts = chv_load_luts,
.read_luts = chv_read_luts,
};
static const struct intel_color_funcs i965_color_funcs = {
.color_check = i9xx_color_check,
- .color_commit = i9xx_color_commit,
+ .color_commit_arm = i9xx_color_commit_arm,
.load_luts = i965_load_luts,
.read_luts = i965_read_luts,
};
static const struct intel_color_funcs i9xx_color_funcs = {
.color_check = i9xx_color_check,
- .color_commit = i9xx_color_commit,
+ .color_commit_arm = i9xx_color_commit_arm,
.load_luts = i9xx_load_luts,
.read_luts = i9xx_read_luts,
};
static const struct intel_color_funcs icl_color_funcs = {
.color_check = icl_color_check,
- .color_commit = skl_color_commit,
+ .color_commit_noarm = icl_color_commit_noarm,
+ .color_commit_arm = skl_color_commit_arm,
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
};
static const struct intel_color_funcs glk_color_funcs = {
.color_check = glk_color_check,
- .color_commit = skl_color_commit,
+ .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_arm = skl_color_commit_arm,
.load_luts = glk_load_luts,
.read_luts = glk_read_luts,
};
static const struct intel_color_funcs skl_color_funcs = {
.color_check = ivb_color_check,
- .color_commit = skl_color_commit,
+ .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_arm = skl_color_commit_arm,
.load_luts = bdw_load_luts,
.read_luts = NULL,
};
static const struct intel_color_funcs bdw_color_funcs = {
.color_check = ivb_color_check,
- .color_commit = hsw_color_commit,
+ .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_arm = hsw_color_commit_arm,
.load_luts = bdw_load_luts,
.read_luts = NULL,
};
static const struct intel_color_funcs hsw_color_funcs = {
.color_check = ivb_color_check,
- .color_commit = hsw_color_commit,
+ .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_arm = hsw_color_commit_arm,
.load_luts = ivb_load_luts,
.read_luts = NULL,
};
static const struct intel_color_funcs ivb_color_funcs = {
.color_check = ivb_color_check,
- .color_commit = ilk_color_commit,
+ .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_arm = ilk_color_commit_arm,
.load_luts = ivb_load_luts,
.read_luts = NULL,
};
static const struct intel_color_funcs ilk_color_funcs = {
.color_check = ilk_color_check,
- .color_commit = ilk_color_commit,
+ .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_arm = ilk_color_commit_arm,
.load_luts = ilk_load_luts,
.read_luts = ilk_read_luts,
};
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 173727aaa24d..fd873425e082 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -14,7 +14,8 @@ struct drm_property_blob;
void intel_color_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
-void intel_color_commit(const struct intel_crtc_state *crtc_state);
+void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
+void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 4dfe77351b8b..64890f39c3cc 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -25,18 +25,29 @@ enum {
};
static const struct icl_procmon {
+ const char *name;
u32 dw1, dw9, dw10;
} icl_procmon_values[] = {
- [PROCMON_0_85V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
- [PROCMON_0_95V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
- [PROCMON_0_95V_DOT_1] =
- { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
- [PROCMON_1_05V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
- [PROCMON_1_05V_DOT_1] =
- { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
+ [PROCMON_0_85V_DOT_0] = {
+ .name = "0.85V dot0 (low-voltage)",
+ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96,
+ },
+ [PROCMON_0_95V_DOT_0] = {
+ .name = "0.95V dot0",
+ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB,
+ },
+ [PROCMON_0_95V_DOT_1] = {
+ .name = "0.95V dot1",
+ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5,
+ },
+ [PROCMON_1_05V_DOT_0] = {
+ .name = "1.05V dot0",
+ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1,
+ },
+ [PROCMON_1_05V_DOT_1] = {
+ .name = "1.05V dot1",
+ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1,
+ },
};
static const struct icl_procmon *
@@ -113,6 +124,10 @@ static bool icl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
procmon = icl_get_procmon_ref_values(dev_priv, phy);
+ drm_dbg_kms(&dev_priv->drm,
+ "Combo PHY %c Voltage/Process Info : %s\n",
+ phy_name(phy), procmon->name);
+
ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy),
(0xff << 16) | 0xff, procmon->dw1);
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy),
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index c65f95a9a1ec..1dcc268927a2 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -54,6 +54,8 @@ int intel_connector_init(struct intel_connector *connector)
__drm_atomic_helper_connector_reset(&connector->base,
&conn_state->base);
+ INIT_LIST_HEAD(&connector->panel.fixed_modes);
+
return 0;
}
@@ -100,7 +102,7 @@ void intel_connector_destroy(struct drm_connector *connector)
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
- intel_panel_fini(&intel_connector->panel);
+ intel_panel_fini(intel_connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 65827481c1b1..4442aa355f86 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -24,6 +24,7 @@
#include "intel_display_debugfs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_drrs.h"
#include "intel_dsi.h"
#include "intel_pipe_crc.h"
#include "intel_psr.h"
@@ -367,6 +368,7 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_color_init(crtc);
+ intel_crtc_drrs_init(crtc);
intel_crtc_crc_init(crtc);
cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE);
@@ -485,6 +487,8 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
+ intel_psr_lock(new_crtc_state);
+
if (new_crtc_state->do_async_flip)
return;
@@ -516,7 +520,7 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
* VBL interrupts will start the PSR exit and prevent a PSR
* re-entry as well.
*/
- intel_psr_wait_for_idle(new_crtc_state);
+ intel_psr_wait_for_idle_locked(new_crtc_state);
local_irq_disable();
@@ -630,6 +634,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
ktime_t end_vbl_time = ktime_get();
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ intel_psr_unlock(new_crtc_state);
+
if (new_crtc_state->do_async_flip)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 2ade8fdd9bdd..8c80de877605 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -153,6 +153,11 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
plane_state->uapi.src = src;
plane_state->uapi.dst = dst;
+ /* final plane coordinates will be relative to the plane's pipe */
+ drm_rect_translate(&plane_state->uapi.dst,
+ -crtc_state->pipe_src.x1,
+ -crtc_state->pipe_src.y1);
+
ret = intel_cursor_check_surface(plane_state);
if (ret)
return ret;
@@ -255,7 +260,6 @@ static void i845_cursor_update_arm(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
u32 cntl = 0, base = 0, pos = 0, size = 0;
- unsigned long irqflags;
if (plane_state && plane_state->uapi.visible) {
unsigned int width = drm_rect_width(&plane_state->uapi.dst);
@@ -270,8 +274,6 @@ static void i845_cursor_update_arm(struct intel_plane *plane,
pos = intel_cursor_position(plane_state);
}
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
/* On these chipsets we can only modify the base/size/stride
* whilst the cursor is disabled.
*/
@@ -290,8 +292,6 @@ static void i845_cursor_update_arm(struct intel_plane *plane,
} else {
intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
}
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i845_cursor_disable_arm(struct intel_plane *plane,
@@ -492,7 +492,6 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
- unsigned long irqflags;
if (plane_state && plane_state->uapi.visible) {
int width = drm_rect_width(&plane_state->uapi.dst);
@@ -508,8 +507,6 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
pos = intel_cursor_position(plane_state);
}
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
/*
* On some platforms writing CURCNTR first will also
* cause CURPOS to be armed by the CURBASE write.
@@ -555,8 +552,6 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
intel_de_write_fw(dev_priv, CURBASE(pipe), base);
}
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_cursor_disable_arm(struct intel_plane *plane,
@@ -637,7 +632,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
* FIXME bigjoiner fastpath would be good
*/
if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
- crtc_state->update_pipe || crtc_state->bigjoiner)
+ crtc_state->update_pipe || crtc_state->bigjoiner_pipes)
goto slow;
/*
@@ -715,6 +710,14 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
+ /*
+ * Technically we should do a vblank evasion here to make
+ * sure all the cursor registers update on the same frame.
+ * For now just make sure the register writes happen as
+ * quickly as possible to minimize the race window.
+ */
+ local_irq_disable();
+
if (new_plane_state->uapi.visible) {
intel_plane_update_noarm(plane, crtc_state, new_plane_state);
intel_plane_update_arm(plane, crtc_state, new_plane_state);
@@ -722,6 +725,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
intel_plane_disable_arm(plane, crtc_state);
}
+ local_irq_enable();
+
intel_plane_unpin_fb(old_plane_state);
out_free:
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index e4260806c2a4..9e6fa59eabba 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -25,8 +25,10 @@
*
*/
+#include <linux/string_helpers.h>
+
+#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_privacy_screen_consumer.h>
-#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
#include "intel_audio.h"
@@ -38,12 +40,12 @@
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
-#include "intel_drrs.h"
#include "intel_dsi.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
@@ -2152,7 +2154,7 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel
enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
drm_dbg_kms(&i915->drm,
"Failed to %s MSA_TIMING_PAR_IGNORE in the sink\n",
- enabledisable(enable));
+ str_enable_disable(enable));
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@@ -2818,10 +2820,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink)
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
- intel_drrs_enable(intel_dp, crtc_state);
-
- if (crtc_state->has_audio)
- intel_audio_codec_enable(encoder, crtc_state, conn_state);
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
trans_port_sync_stop_link_train(state, encoder, crtc_state);
}
@@ -2915,8 +2914,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, DDI_BUF_CTL(port),
dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
- if (crtc_state->has_audio)
- intel_audio_codec_enable(encoder, crtc_state, conn_state);
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
static void intel_enable_ddi(struct intel_atomic_state *state,
@@ -2957,11 +2955,8 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
intel_dp->link_trained = false;
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder,
- old_crtc_state, old_conn_state);
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
- intel_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
@@ -2980,9 +2975,7 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = old_conn_state->connector;
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder,
- old_crtc_state, old_conn_state);
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
@@ -3011,12 +3004,9 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
intel_ddi_set_dp_msa(crtc_state, conn_state);
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
- intel_drrs_update(intel_dp, crtc_state);
intel_backlight_update(state, encoder, crtc_state, conn_state);
drm_connector_update_privacy_screen(conn_state);
@@ -4308,6 +4298,13 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
return;
}
+ if (intel_phy_is_snps(dev_priv, phy) &&
+ dev_priv->snps_phy_failed_calibration & BIT(phy)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "SNPS PHY %c failed to calibrate, proceeding anyway\n",
+ phy_name(phy));
+ }
+
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
return;
@@ -4368,7 +4365,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->get_power_domains = intel_ddi_get_power_domains;
encoder->type = INTEL_OUTPUT_DDI;
- encoder->power_domain = intel_port_to_power_domain(port);
+ encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
encoder->port = port;
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
@@ -4496,8 +4493,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
}
drm_WARN_ON(&dev_priv->drm, port > PORT_I);
- dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
- port - PORT_A;
+ dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
if (init_dp) {
if (!intel_ddi_init_dp_connector(dig_port))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 934a9f9e7dab..85f58dd3df72 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -907,7 +907,7 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr[] = {
{ .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
{ .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
{ .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
- { .icl = { 0xC, 0x73, 0x3E, 0x00, 0x01 } }, /* 650 700 0.6 */
+ { .icl = { 0xC, 0x7C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
{ .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
@@ -921,7 +921,7 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_hbr3[
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
- { .icl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .icl = { 0xC, 0x71, 0x30, 0x00, 0x0F } }, /* 350 700 6.0 */
{ .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
{ .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
{ .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
@@ -931,19 +931,47 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_hbr3[
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0x4, 0x50, 0x38, 0x00, 0x07 } }, /* 200 200 0.0 */
+ { .icl = { 0x4, 0x58, 0x35, 0x00, 0x0A } }, /* 200 250 1.9 */
+ { .icl = { 0x4, 0x60, 0x34, 0x00, 0x0B } }, /* 200 300 3.5 */
+ { .icl = { 0x4, 0x6A, 0x32, 0x00, 0x0D } }, /* 200 350 4.9 */
+ { .icl = { 0x4, 0x5E, 0x38, 0x00, 0x07 } }, /* 250 250 0.0 */
+ { .icl = { 0x4, 0x61, 0x36, 0x00, 0x09 } }, /* 250 300 1.6 */
+ { .icl = { 0x4, 0x6B, 0x34, 0x00, 0x0B } }, /* 250 350 2.9 */
+ { .icl = { 0x4, 0x69, 0x39, 0x00, 0x06 } }, /* 300 300 0.0 */
+ { .icl = { 0x4, 0x73, 0x37, 0x00, 0x08 } }, /* 300 350 1.3 */
+ { .icl = { 0x4, 0x7A, 0x38, 0x00, 0x07 } }, /* 350 350 0.0 */
+};
+
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_edp_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x71, 0x30, 0x00, 0x0f } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr2_hbr3 = {
.entries = _adlp_combo_phy_trans_dp_hbr2_hbr3,
.num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr2_hbr3),
};
static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_hbr3 = {
- .entries = _icl_combo_phy_trans_dp_hbr2_edp_hbr3,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_dp_hbr2_edp_hbr3),
+ .entries = _adlp_combo_phy_trans_dp_hbr2_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr2_edp_hbr3),
};
static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_up_to_hbr2 = {
- .entries = _icl_combo_phy_trans_edp_hbr2,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_edp_hbr2),
+ .entries = _adlp_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_edp_hbr2),
};
static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr[] = {
@@ -1645,7 +1673,9 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
encoder->get_buf_trans = skl_get_buf_trans;
} else if (IS_BROADWELL(i915)) {
encoder->get_buf_trans = bdw_get_buf_trans;
- } else {
+ } else if (IS_HASWELL(i915)) {
encoder->get_buf_trans = hsw_get_buf_trans;
+ } else {
+ MISSING_CASE(INTEL_INFO(i915)->platform);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 7dfeb458aa65..806d50b302ab 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -32,13 +32,14 @@
#include <linux/module.h>
#include <linux/dma-resv.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include <linux/vga_switcheroo.h>
+#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
-#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
@@ -50,6 +51,7 @@
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
#include "display/intel_display_debugfs.h"
+#include "display/intel_display_power.h"
#include "display/intel_dp.h"
#include "display/intel_dp_mst.h"
#include "display/intel_dpll.h"
@@ -76,6 +78,7 @@
#include "g4x_hdmi.h"
#include "hsw_ips.h"
#include "i915_drv.h"
+#include "i915_utils.h"
#include "icl_dsi.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
@@ -368,6 +371,11 @@ bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
crtc->pipe == bigjoiner_master_pipe(crtc_state);
}
+static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
+{
+ return hweight8(crtc_state->bigjoiner_pipes);
+}
+
struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
@@ -400,7 +408,7 @@ static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
drm_err(&dev_priv->drm,
"pipe %c scanline %s wait timed out\n",
- pipe_name(pipe), onoff(state));
+ pipe_name(pipe), str_on_off(state));
}
static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
@@ -456,7 +464,7 @@ void assert_transcoder(struct drm_i915_private *dev_priv,
I915_STATE_WARN(cur_state != state,
"transcoder %s assertion failure (expected %s, current %s)\n",
transcoder_name(cpu_transcoder),
- onoff(state), onoff(cur_state));
+ str_on_off(state), str_on_off(cur_state));
}
static void assert_plane(struct intel_plane *plane, bool state)
@@ -468,7 +476,8 @@ static void assert_plane(struct intel_plane *plane, bool state)
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
- plane->base.name, onoff(state), onoff(cur_state));
+ plane->base.name, str_on_off(state),
+ str_on_off(cur_state));
}
#define assert_plane_enabled(p) assert_plane(p, true)
@@ -517,16 +526,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
expected_mask);
}
-enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (HAS_PCH_LPT(dev_priv))
- return PIPE_A;
- else
- return crtc->pipe;
-}
-
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
@@ -783,6 +782,9 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_plane_bitmasks(crtc_state);
crtc_state->data_rate[plane->id] = 0;
+ crtc_state->data_rate_y[plane->id] = 0;
+ crtc_state->rel_data_rate[plane->id] = 0;
+ crtc_state->rel_data_rate_y[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
@@ -1117,13 +1119,13 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
- PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
+ intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
else
- intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
- PF_FILTER_MED_3x3);
- intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
- intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
+ intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3);
+ intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
+ intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
}
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
@@ -1197,7 +1199,7 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- return crtc_state->uapi.async_flip && intel_vtd_active(i915) &&
+ return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
}
@@ -1232,7 +1234,6 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
hsw_ips_post_update(state, crtc);
intel_fbc_post_update(state, crtc);
- intel_drrs_page_flip(state, crtc);
if (needs_async_flip_vtd_wa(old_crtc_state) &&
!needs_async_flip_vtd_wa(new_crtc_state))
@@ -1250,6 +1251,7 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
!needs_cursorclk_wa(new_crtc_state))
icl_wa_cursorclkgating(dev_priv, pipe, false);
+ intel_drrs_activate(new_crtc_state);
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -1327,6 +1329,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
+ intel_drrs_deactivate(old_crtc_state);
+
intel_psr_pre_plane_update(state, crtc);
if (hsw_ips_pre_update(state, crtc))
@@ -1777,7 +1781,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
* clocks enabled
*/
intel_color_load_luts(new_crtc_state);
- intel_color_commit(new_crtc_state);
+ intel_color_commit_noarm(new_crtc_state);
+ intel_color_commit_arm(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
@@ -1822,29 +1827,6 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
}
-static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- u32 val;
-
- /* Wa_22010947358:adl-p */
- if (IS_ALDERLAKE_P(dev_priv))
- val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
- else
- val = MBUS_DBOX_A_CREDIT(2);
-
- if (DISPLAY_VER(dev_priv) >= 12) {
- val |= MBUS_DBOX_BW_CREDIT(2);
- val |= MBUS_DBOX_B_CREDIT(12);
- } else {
- val |= MBUS_DBOX_BW_CREDIT(1);
- val |= MBUS_DBOX_B_CREDIT(8);
- }
-
- intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
-}
-
static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1864,7 +1846,7 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
+ val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -1926,7 +1908,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
- if (!new_crtc_state->bigjoiner) {
+ if (!new_crtc_state->bigjoiner_pipes) {
intel_encoders_pre_pll_enable(state, crtc);
if (new_crtc_state->shared_dpll)
@@ -1968,7 +1950,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
* clocks enabled
*/
intel_color_load_luts(new_crtc_state);
- intel_color_commit(new_crtc_state);
+ intel_color_commit_noarm(new_crtc_state);
+ intel_color_commit_arm(new_crtc_state);
/* update DSPCNTR to configure gamma/csc for pipe bottom color */
if (DISPLAY_VER(dev_priv) < 9)
intel_disable_primary_plane(new_crtc_state);
@@ -1980,13 +1963,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_initial_watermarks(state, crtc);
- if (DISPLAY_VER(dev_priv) >= 11) {
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
-
- icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
- }
-
if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
intel_crtc_vblank_on(new_crtc_state);
@@ -2021,9 +1997,9 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->pch_pfit.enabled)
return;
- intel_de_write(dev_priv, PF_CTL(pipe), 0);
- intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
- intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
+ intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
+ intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
+ intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
}
static void ilk_crtc_disable(struct intel_atomic_state *state,
@@ -2182,153 +2158,82 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
return TC_PORT_1 + port - PORT_C;
}
-enum intel_display_power_domain intel_port_to_power_domain(enum port port)
-{
- switch (port) {
- case PORT_A:
- return POWER_DOMAIN_PORT_DDI_A_LANES;
- case PORT_B:
- return POWER_DOMAIN_PORT_DDI_B_LANES;
- case PORT_C:
- return POWER_DOMAIN_PORT_DDI_C_LANES;
- case PORT_D:
- return POWER_DOMAIN_PORT_DDI_D_LANES;
- case PORT_E:
- return POWER_DOMAIN_PORT_DDI_E_LANES;
- case PORT_F:
- return POWER_DOMAIN_PORT_DDI_F_LANES;
- case PORT_G:
- return POWER_DOMAIN_PORT_DDI_G_LANES;
- case PORT_H:
- return POWER_DOMAIN_PORT_DDI_H_LANES;
- case PORT_I:
- return POWER_DOMAIN_PORT_DDI_I_LANES;
- default:
- MISSING_CASE(port);
- return POWER_DOMAIN_PORT_OTHER;
- }
-}
-
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
- if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
- switch (dig_port->aux_ch) {
- case AUX_CH_C:
- return POWER_DOMAIN_AUX_C_TBT;
- case AUX_CH_D:
- return POWER_DOMAIN_AUX_D_TBT;
- case AUX_CH_E:
- return POWER_DOMAIN_AUX_E_TBT;
- case AUX_CH_F:
- return POWER_DOMAIN_AUX_F_TBT;
- case AUX_CH_G:
- return POWER_DOMAIN_AUX_G_TBT;
- case AUX_CH_H:
- return POWER_DOMAIN_AUX_H_TBT;
- case AUX_CH_I:
- return POWER_DOMAIN_AUX_I_TBT;
- default:
- MISSING_CASE(dig_port->aux_ch);
- return POWER_DOMAIN_AUX_C_TBT;
- }
- }
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
-}
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
-/*
- * Converts aux_ch to power_domain without caring about TBT ports for that use
- * intel_aux_power_domain()
- */
-enum intel_display_power_domain
-intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
-{
- switch (aux_ch) {
- case AUX_CH_A:
- return POWER_DOMAIN_AUX_A;
- case AUX_CH_B:
- return POWER_DOMAIN_AUX_B;
- case AUX_CH_C:
- return POWER_DOMAIN_AUX_C;
- case AUX_CH_D:
- return POWER_DOMAIN_AUX_D;
- case AUX_CH_E:
- return POWER_DOMAIN_AUX_E;
- case AUX_CH_F:
- return POWER_DOMAIN_AUX_F;
- case AUX_CH_G:
- return POWER_DOMAIN_AUX_G;
- case AUX_CH_H:
- return POWER_DOMAIN_AUX_H;
- case AUX_CH_I:
- return POWER_DOMAIN_AUX_I;
- default:
- MISSING_CASE(aux_ch);
- return POWER_DOMAIN_AUX_A;
- }
+ return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
}
-static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
+static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *mask)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct drm_encoder *encoder;
enum pipe pipe = crtc->pipe;
- u64 mask;
+
+ bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
if (!crtc_state->hw.active)
- return 0;
+ return;
- mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
- mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+ set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
+ set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
if (crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru)
- mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+ set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
drm_for_each_encoder_mask(encoder, &dev_priv->drm,
crtc_state->uapi.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- mask |= BIT_ULL(intel_encoder->power_domain);
+ set_bit(intel_encoder->power_domain, mask->bits);
}
if (HAS_DDI(dev_priv) && crtc_state->has_audio)
- mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
+ set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
if (crtc_state->shared_dpll)
- mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
+ set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
if (crtc_state->dsc.compression_enable)
- mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
-
- return mask;
+ set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
}
-static u64
-modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
+static void
+modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *old_domains)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain domain;
- u64 domains, new_domains, old_domains;
+ struct intel_power_domain_mask domains, new_domains;
- domains = get_crtc_power_domains(crtc_state);
+ get_crtc_power_domains(crtc_state, &domains);
- new_domains = domains & ~crtc->enabled_power_domains.mask;
- old_domains = crtc->enabled_power_domains.mask & ~domains;
+ bitmap_andnot(new_domains.bits,
+ domains.bits,
+ crtc->enabled_power_domains.mask.bits,
+ POWER_DOMAIN_NUM);
+ bitmap_andnot(old_domains->bits,
+ crtc->enabled_power_domains.mask.bits,
+ domains.bits,
+ POWER_DOMAIN_NUM);
- for_each_power_domain(domain, new_domains)
+ for_each_power_domain(domain, &new_domains)
intel_display_power_get_in_set(dev_priv,
&crtc->enabled_power_domains,
domain);
-
- return old_domains;
}
static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
- u64 domains)
+ struct intel_power_domain_mask *domains)
{
intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
&crtc->enabled_power_domains,
@@ -2388,7 +2293,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
i9xx_pfit_enable(new_crtc_state);
intel_color_load_luts(new_crtc_state);
- intel_color_commit(new_crtc_state);
+ intel_color_commit_noarm(new_crtc_state);
+ intel_color_commit_arm(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
@@ -2427,7 +2333,8 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
i9xx_pfit_enable(new_crtc_state);
intel_color_load_luts(new_crtc_state);
- intel_color_commit(new_crtc_state);
+ intel_color_commit_noarm(new_crtc_state);
+ intel_color_commit_arm(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
@@ -2683,8 +2590,8 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
return pixel_rate;
drm_rect_init(&src, 0, 0,
- crtc_state->pipe_src_w << 16,
- crtc_state->pipe_src_h << 16);
+ drm_rect_width(&crtc_state->pipe_src) << 16,
+ drm_rect_height(&crtc_state->pipe_src) << 16);
return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
pixel_rate);
@@ -2724,58 +2631,81 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
ilk_pipe_pixel_rate(crtc_state);
}
+static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
+ struct drm_display_mode *mode)
+{
+ int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
+
+ if (num_pipes < 2)
+ return;
+
+ mode->crtc_clock /= num_pipes;
+ mode->crtc_hdisplay /= num_pipes;
+ mode->crtc_hblank_start /= num_pipes;
+ mode->crtc_hblank_end /= num_pipes;
+ mode->crtc_hsync_start /= num_pipes;
+ mode->crtc_hsync_end /= num_pipes;
+ mode->crtc_htotal /= num_pipes;
+}
+
+static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
+ struct drm_display_mode *mode)
+{
+ int overlap = crtc_state->splitter.pixel_overlap;
+ int n = crtc_state->splitter.link_count;
+
+ if (!crtc_state->splitter.enable)
+ return;
+
+ /*
+ * eDP MSO uses segment timings from EDID for transcoder
+ * timings, but full mode for everything else.
+ *
+ * h_full = (h_segment - pixel_overlap) * link_count
+ */
+ mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
+ mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
+ mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
+ mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
+ mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
+ mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
+ mode->crtc_clock *= n;
+}
+
static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
{
struct drm_display_mode *mode = &crtc_state->hw.mode;
struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ /*
+ * Start with the adjusted_mode crtc timings, which
+ * have been filled with the transcoder timings.
+ */
drm_mode_copy(pipe_mode, adjusted_mode);
- if (crtc_state->bigjoiner) {
- /*
- * transcoder is programmed to the full mode,
- * but pipe timings are half of the transcoder mode
- */
- pipe_mode->crtc_hdisplay /= 2;
- pipe_mode->crtc_hblank_start /= 2;
- pipe_mode->crtc_hblank_end /= 2;
- pipe_mode->crtc_hsync_start /= 2;
- pipe_mode->crtc_hsync_end /= 2;
- pipe_mode->crtc_htotal /= 2;
- pipe_mode->crtc_clock /= 2;
- }
+ /* Expand MSO per-segment transcoder timings to full */
+ intel_splitter_adjust_timings(crtc_state, pipe_mode);
- if (crtc_state->splitter.enable) {
- int n = crtc_state->splitter.link_count;
- int overlap = crtc_state->splitter.pixel_overlap;
+ /*
+ * We want the full numbers in adjusted_mode normal timings,
+ * adjusted_mode crtc timings are left with the raw transcoder
+ * timings.
+ */
+ intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
- /*
- * eDP MSO uses segment timings from EDID for transcoder
- * timings, but full mode for everything else.
- *
- * h_full = (h_segment - pixel_overlap) * link_count
- */
- pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
- pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
- pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
- pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
- pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
- pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
- pipe_mode->crtc_clock *= n;
-
- intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
- intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
- } else {
- intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
- intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
- }
+ /* Populate the "user" mode with full numbers */
+ drm_mode_copy(mode, pipe_mode);
+ intel_mode_from_crtc_timings(mode, mode);
+ mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
+ (intel_bigjoiner_num_pipes(crtc_state) ?: 1);
+ mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
- intel_crtc_compute_pixel_rate(crtc_state);
+ /* Derive per-pipe timings in case bigjoiner is used */
+ intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
+ intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
- drm_mode_copy(mode, adjusted_mode);
- mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
- mode->vdisplay = crtc_state->pipe_src_h;
+ intel_crtc_compute_pixel_rate(crtc_state);
}
static void intel_encoder_get_config(struct intel_encoder *encoder,
@@ -2786,44 +2716,77 @@ static void intel_encoder_get_config(struct intel_encoder *encoder,
intel_crtc_readout_derived_state(crtc_state);
}
-static int intel_crtc_compute_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
- int clock_limit = dev_priv->max_dotclk_freq;
+ int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
+ int width, height;
- drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
+ if (num_pipes < 2)
+ return;
- /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
- if (pipe_config->bigjoiner) {
- pipe_mode->crtc_clock /= 2;
- pipe_mode->crtc_hdisplay /= 2;
- pipe_mode->crtc_hblank_start /= 2;
- pipe_mode->crtc_hblank_end /= 2;
- pipe_mode->crtc_hsync_start /= 2;
- pipe_mode->crtc_hsync_end /= 2;
- pipe_mode->crtc_htotal /= 2;
- pipe_config->pipe_src_w /= 2;
- }
+ width = drm_rect_width(&crtc_state->pipe_src);
+ height = drm_rect_height(&crtc_state->pipe_src);
+
+ drm_rect_init(&crtc_state->pipe_src, 0, 0,
+ width / num_pipes, height);
+}
+
+static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ intel_bigjoiner_compute_pipe_src(crtc_state);
- if (pipe_config->splitter.enable) {
- int n = pipe_config->splitter.link_count;
- int overlap = pipe_config->splitter.pixel_overlap;
+ /*
+ * Pipe horizontal size must be even in:
+ * - DVO ganged mode
+ * - LVDS dual channel mode
+ * - Double wide pipe
+ */
+ if (drm_rect_width(&crtc_state->pipe_src) & 1) {
+ if (crtc_state->double_wide) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
- pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
- pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
- pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
- pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
- pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
- pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
- pipe_mode->crtc_clock *= n;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_is_dual_link_lvds(i915)) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
}
+ return 0;
+}
+
+static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int clock_limit = i915->max_dotclk_freq;
+
+ /*
+ * Start with the adjusted_mode crtc timings, which
+ * have been filled with the transcoder timings.
+ */
+ drm_mode_copy(pipe_mode, adjusted_mode);
+
+ /* Expand MSO per-segment transcoder timings to full */
+ intel_splitter_adjust_timings(crtc_state, pipe_mode);
+
+ /* Derive per-pipe timings in case bigjoiner is used */
+ intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
- if (DISPLAY_VER(dev_priv) < 4) {
- clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
+ if (DISPLAY_VER(i915) < 4) {
+ clock_limit = i915->max_cdclk_freq * 9 / 10;
/*
* Enable double wide mode when the dot clock
@@ -2831,44 +2794,40 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
*/
if (intel_crtc_supports_double_wide(crtc) &&
pipe_mode->crtc_clock > clock_limit) {
- clock_limit = dev_priv->max_dotclk_freq;
- pipe_config->double_wide = true;
+ clock_limit = i915->max_dotclk_freq;
+ crtc_state->double_wide = true;
}
}
if (pipe_mode->crtc_clock > clock_limit) {
- drm_dbg_kms(&dev_priv->drm,
- "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+ crtc->base.base.id, crtc->base.name,
pipe_mode->crtc_clock, clock_limit,
- yesno(pipe_config->double_wide));
+ str_yes_no(crtc_state->double_wide));
return -EINVAL;
}
- /*
- * Pipe horizontal size must be even in:
- * - DVO ganged mode
- * - LVDS dual channel mode
- * - Double wide pipe
- */
- if (pipe_config->pipe_src_w & 1) {
- if (pipe_config->double_wide) {
- drm_dbg_kms(&dev_priv->drm,
- "Odd pipe source width not supported with double wide pipe\n");
- return -EINVAL;
- }
+ return 0;
+}
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "Odd pipe source width not supported with dual link LVDS\n");
- return -EINVAL;
- }
- }
+static int intel_crtc_compute_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int ret;
- intel_crtc_compute_pixel_rate(pipe_config);
+ ret = intel_crtc_compute_pipe_src(crtc_state);
+ if (ret)
+ return ret;
- if (pipe_config->has_pch_encoder)
- return ilk_fdi_compute_config(crtc, pipe_config);
+ ret = intel_crtc_compute_pipe_mode(crtc_state);
+ if (ret)
+ return ret;
+
+ intel_crtc_compute_pixel_rate(crtc_state);
+
+ if (crtc_state->has_pch_encoder)
+ return ilk_fdi_compute_config(crtc, crtc_state);
return 0;
}
@@ -2941,8 +2900,8 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
drm_dbg_kms(&dev_priv->drm,
"SSC %s by BIOS, overriding VBT which says %s\n",
- enableddisabled(bios_lvds_use_ssc),
- enableddisabled(dev_priv->vbt.lvds_use_ssc));
+ str_enabled_disabled(bios_lvds_use_ssc),
+ str_enabled_disabled(dev_priv->vbt.lvds_use_ssc));
dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
@@ -3072,14 +3031,15 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int width = drm_rect_width(&crtc_state->pipe_src);
+ int height = drm_rect_height(&crtc_state->pipe_src);
enum pipe pipe = crtc->pipe;
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
intel_de_write(dev_priv, PIPESRC(pipe),
- PIPESRC_WIDTH(crtc_state->pipe_src_w - 1) |
- PIPESRC_HEIGHT(crtc_state->pipe_src_h - 1));
+ PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@@ -3142,6 +3102,23 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
}
}
+static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
+ enum pipe master_pipe, pipe = crtc->pipe;
+ int width;
+
+ if (num_pipes < 2)
+ return;
+
+ master_pipe = bigjoiner_master_pipe(crtc_state);
+ width = drm_rect_width(&crtc_state->pipe_src);
+
+ drm_rect_translate_to(&crtc_state->pipe_src,
+ (pipe - master_pipe) * width, 0);
+}
+
static void intel_get_pipe_src_size(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -3150,8 +3127,12 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
u32 tmp;
tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
- pipe_config->pipe_src_w = REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1;
- pipe_config->pipe_src_h = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1;
+
+ drm_rect_init(&pipe_config->pipe_src, 0, 0,
+ REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
+ REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
+
+ intel_bigjoiner_adjust_pipe_src(pipe_config);
}
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
@@ -3207,7 +3188,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
+ pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
@@ -3397,6 +3378,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
+ pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+
if (IS_CHERRYVIEW(dev_priv))
pipe_config->cgm_mode = intel_de_read(dev_priv,
CGM_PIPE_MODE(crtc->pipe));
@@ -3522,7 +3505,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
+ val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
intel_de_write(dev_priv, PIPECONF(pipe), val);
intel_de_posting_read(dev_priv, PIPECONF(pipe));
@@ -3554,12 +3538,8 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
-
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = 0;
- int i;
switch (crtc_state->pipe_bpp) {
case 18:
@@ -3598,23 +3578,6 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(dev_priv) >= 12)
val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
- if (IS_ALDERLAKE_P(dev_priv)) {
- bool scaler_in_use = false;
-
- for (i = 0; i < crtc->num_scalers; i++) {
- if (!scaler_state->scalers[i].in_use)
- continue;
-
- scaler_in_use = true;
- break;
- }
-
- intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
- PIPE_MISC2_BUBBLE_COUNTER_MASK,
- scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
- PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
- }
-
intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
}
@@ -3830,6 +3793,10 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
+ pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+
+ pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
+
pipe_config->csc_mode = intel_de_read(dev_priv,
PIPE_CSC_MODE(crtc->pipe));
@@ -4164,7 +4131,6 @@ static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
return;
- crtc_state->bigjoiner = true;
crtc_state->bigjoiner_pipes =
BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
@@ -4265,6 +4231,15 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(dev_priv, CHICKEN_TRANS(pipe_config->cpu_transcoder));
+
+ pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
+ } else {
+ /* no idea if this is correct */
+ pipe_config->framestart_delay = 1;
+ }
+
out:
intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
@@ -4746,6 +4721,8 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
crtc_state->enabled_planes &= ~BIT(plane->id);
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
+ crtc_state->data_rate[plane->id] = 0;
+ crtc_state->rel_data_rate[plane->id] = 0;
}
plane_state->planar_slave = false;
@@ -4790,6 +4767,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
crtc_state->enabled_planes |= BIT(linked->id);
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
+ crtc_state->data_rate[linked->id] =
+ crtc_state->data_rate_y[plane->id];
+ crtc_state->rel_data_rate[linked->id] =
+ crtc_state->rel_data_rate_y[plane->id];
drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
linked->base.name, plane->base.name);
@@ -4923,9 +4904,12 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
mode_changed && !crtc_state->hw.active)
crtc_state->update_wm_post = true;
- if (mode_changed && crtc_state->hw.enable &&
- !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
- ret = intel_dpll_crtc_compute_clock(crtc_state);
+ if (mode_changed) {
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
if (ret)
return ret;
}
@@ -5222,7 +5206,7 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
drm_dbg_kms(&i915->drm,
"[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
plane->base.base.id, plane->base.name,
- yesno(plane_state->uapi.visible));
+ str_yes_no(plane_state->uapi.visible));
return;
}
@@ -5230,7 +5214,7 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
"[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
plane->base.base.id, plane->base.name,
fb->base.id, fb->width, fb->height, &fb->format->format,
- fb->modifier, yesno(plane_state->uapi.visible));
+ fb->modifier, str_yes_no(plane_state->uapi.visible));
drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
plane_state->hw.rotation, plane_state->scaler_id);
if (plane_state->uapi.visible)
@@ -5253,7 +5237,7 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
crtc->base.base.id, crtc->base.name,
- yesno(pipe_config->hw.enable), context);
+ str_yes_no(pipe_config->hw.enable), context);
if (!pipe_config->hw.enable)
goto dump_planes;
@@ -5261,7 +5245,7 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
drm_dbg_kms(&dev_priv->drm,
"active: %s, output_types: %s (0x%x), output format: %s\n",
- yesno(pipe_config->hw.active),
+ str_yes_no(pipe_config->hw.active),
buf, pipe_config->output_types,
output_formats(pipe_config->output_format));
@@ -5284,7 +5268,7 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
pipe_config->bigjoiner_pipes);
drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
- enableddisabled(pipe_config->splitter.enable),
+ str_enabled_disabled(pipe_config->splitter.enable),
pipe_config->splitter.link_count,
pipe_config->splitter.pixel_overlap);
@@ -5302,6 +5286,9 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
&pipe_config->dp_m2_n2);
}
+ drm_dbg_kms(&dev_priv->drm, "framestart delay: %d, MSA timing delay: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+
drm_dbg_kms(&dev_priv->drm,
"audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
pipe_config->has_audio, pipe_config->has_infoframe,
@@ -5331,25 +5318,24 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
- yesno(pipe_config->vrr.enable),
+ str_yes_no(pipe_config->vrr.enable),
pipe_config->vrr.vmin, pipe_config->vrr.vmax,
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
pipe_config->vrr.flipline,
intel_vrr_vmin_vblank_start(pipe_config),
intel_vrr_vmax_vblank_start(pipe_config));
- drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->hw.mode);
- drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
+ drm_dbg_kms(&dev_priv->drm, "requested mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.mode));
+ drm_dbg_kms(&dev_priv->drm, "adjusted mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
- drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
+ drm_dbg_kms(&dev_priv->drm, "pipe mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
drm_dbg_kms(&dev_priv->drm,
- "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
- pipe_config->port_clock,
- pipe_config->pipe_src_w, pipe_config->pipe_src_h,
+ "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
pipe_config->pixel_rate);
drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
@@ -5372,11 +5358,12 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
drm_dbg_kms(&dev_priv->drm,
"pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
- enableddisabled(pipe_config->pch_pfit.enabled),
- yesno(pipe_config->pch_pfit.force_thru));
+ str_enabled_disabled(pipe_config->pch_pfit.enabled),
+ str_yes_no(pipe_config->pch_pfit.force_thru));
- drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
- pipe_config->ips_enabled, pipe_config->double_wide);
+ drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i, drrs: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide,
+ pipe_config->has_drrs);
intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
@@ -5504,8 +5491,10 @@ intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
crtc_state->hw.enable = crtc_state->uapi.enable;
crtc_state->hw.active = crtc_state->uapi.active;
- crtc_state->hw.mode = crtc_state->uapi.mode;
- crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
+ drm_mode_copy(&crtc_state->hw.mode,
+ &crtc_state->uapi.mode);
+ drm_mode_copy(&crtc_state->hw.adjusted_mode,
+ &crtc_state->uapi.adjusted_mode);
crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
@@ -5563,6 +5552,9 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, master_crtc);
struct intel_crtc_state *saved_state;
+ WARN_ON(master_crtc_state->bigjoiner_pipes !=
+ slave_crtc_state->bigjoiner_pipes);
+
saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
if (!saved_state)
return -ENOMEM;
@@ -5582,19 +5574,22 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
slave_crtc_state->hw.active = master_crtc_state->hw.active;
- slave_crtc_state->hw.mode = master_crtc_state->hw.mode;
- slave_crtc_state->hw.pipe_mode = master_crtc_state->hw.pipe_mode;
- slave_crtc_state->hw.adjusted_mode = master_crtc_state->hw.adjusted_mode;
+ drm_mode_copy(&slave_crtc_state->hw.mode,
+ &master_crtc_state->hw.mode);
+ drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
+ &master_crtc_state->hw.pipe_mode);
+ drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
+ &master_crtc_state->hw.adjusted_mode);
slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
- /* Some fixups */
slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
- slave_crtc_state->cpu_transcoder = master_crtc_state->cpu_transcoder;
- slave_crtc_state->has_audio = master_crtc_state->has_audio;
+
+ WARN_ON(master_crtc_state->bigjoiner_pipes !=
+ slave_crtc_state->bigjoiner_pipes);
return 0;
}
@@ -5647,12 +5642,15 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
+ int pipe_src_w, pipe_src_h;
int base_bpp, ret, i;
bool retry = true;
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
+ pipe_config->framestart_delay = 1;
+
/*
* Sanitize sync polarity flags based on requested ones. If neither
* positive or negative polarity is requested, treat this as meaning
@@ -5682,8 +5680,9 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
* can be changed by the connectors in the below retry loop.
*/
drm_mode_get_hv_timing(&pipe_config->hw.mode,
- &pipe_config->pipe_src_w,
- &pipe_config->pipe_src_h);
+ &pipe_src_w, &pipe_src_h);
+ drm_rect_init(&pipe_config->pipe_src, 0, 0,
+ pipe_src_w, pipe_src_h);
for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
struct intel_encoder *encoder =
@@ -5786,6 +5785,8 @@ intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
struct drm_connector *connector;
int i;
+ intel_bigjoiner_adjust_pipe_src(crtc_state);
+
for_each_new_connector_in_state(&state->base, connector,
conn_state, i) {
struct intel_encoder *encoder =
@@ -6022,8 +6023,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (current_config->name != pipe_config->name) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected %s, found %s)", \
- yesno(current_config->name), \
- yesno(pipe_config->name)); \
+ str_yes_no(current_config->name), \
+ str_yes_no(pipe_config->name)); \
ret = false; \
} \
} while (0)
@@ -6039,8 +6040,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} else { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
- yesno(current_config->name), \
- yesno(pipe_config->name)); \
+ str_yes_no(current_config->name), \
+ str_yes_no(pipe_config->name)); \
ret = false; \
} \
} while (0)
@@ -6190,6 +6191,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(output_types);
+ PIPE_CONF_CHECK_I(framestart_delay);
+ PIPE_CONF_CHECK_I(msa_timing_delay);
+
PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
@@ -6260,8 +6264,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
if (!fastset) {
- PIPE_CONF_CHECK_I(pipe_src_w);
- PIPE_CONF_CHECK_I(pipe_src_h);
+ PIPE_CONF_CHECK_I(pipe_src.x1);
+ PIPE_CONF_CHECK_I(pipe_src.y1);
+ PIPE_CONF_CHECK_I(pipe_src.x2);
+ PIPE_CONF_CHECK_I(pipe_src.y2);
PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
@@ -6363,7 +6369,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
- PIPE_CONF_CHECK_BOOL(bigjoiner);
PIPE_CONF_CHECK_X(bigjoiner_pipes);
PIPE_CONF_CHECK_I(dsc.compression_enable);
@@ -6420,8 +6425,8 @@ static void verify_wm_state(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
- struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
struct skl_pipe_wm wm;
} *hw;
const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
@@ -6438,7 +6443,7 @@ static void verify_wm_state(struct intel_crtc *crtc,
skl_pipe_wm_get_hw_state(crtc, &hw->wm);
- skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
@@ -6520,8 +6525,8 @@ static void verify_wm_state(struct intel_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw->ddb_y[plane->id];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
drm_err(&dev_priv->drm,
@@ -6897,8 +6902,9 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_display_mode adjusted_mode =
- crtc_state->hw.adjusted_mode;
+ struct drm_display_mode adjusted_mode;
+
+ drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
if (crtc_state->vrr.enable) {
adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
@@ -6956,14 +6962,10 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
static void intel_modeset_clear_plls(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
- if (!dev_priv->dpll_funcs)
- return;
-
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
@@ -7294,32 +7296,26 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
struct intel_crtc_state *master_crtc_state =
intel_atomic_get_new_crtc_state(state, master_crtc);
struct intel_crtc *slave_crtc;
- u8 slave_pipes;
-
- /*
- * TODO: encoder.compute_config() may be the best
- * place to populate the bitmask for the master crtc.
- * For now encoder.compute_config() just flags things
- * as needing bigjoiner and we populate the bitmask
- * here.
- */
- WARN_ON(master_crtc_state->bigjoiner_pipes);
- if (!master_crtc_state->bigjoiner)
+ if (!master_crtc_state->bigjoiner_pipes)
return 0;
- slave_pipes = BIT(master_crtc->pipe + 1);
+ /* sanity check */
+ if (drm_WARN_ON(&i915->drm,
+ master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
+ return -EINVAL;
- if (slave_pipes & ~bigjoiner_pipes(i915)) {
+ if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] Cannot act as big joiner master "
- "(need 0x%x as slave pipes, only 0x%x possible)\n",
+ "(need 0x%x as pipes, only 0x%x possible)\n",
master_crtc->base.base.id, master_crtc->base.name,
- slave_pipes, bigjoiner_pipes(i915));
+ master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
return -EINVAL;
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, slave_pipes) {
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
struct intel_crtc_state *slave_crtc_state;
int ret;
@@ -7353,10 +7349,8 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
slave_crtc->base.base.id, slave_crtc->base.name,
master_crtc->base.base.id, master_crtc->base.name);
- master_crtc_state->bigjoiner_pipes =
- BIT(master_crtc->pipe) | BIT(slave_crtc->pipe);
slave_crtc_state->bigjoiner_pipes =
- BIT(master_crtc->pipe) | BIT(slave_crtc->pipe);
+ master_crtc_state->bigjoiner_pipes;
ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
if (ret)
@@ -7379,13 +7373,11 @@ static void kill_bigjoiner_slave(struct intel_atomic_state *state,
struct intel_crtc_state *slave_crtc_state =
intel_atomic_get_new_crtc_state(state, slave_crtc);
- slave_crtc_state->bigjoiner = false;
slave_crtc_state->bigjoiner_pipes = 0;
intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
}
- master_crtc_state->bigjoiner = false;
master_crtc_state->bigjoiner_pipes = 0;
}
@@ -7479,18 +7471,24 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (!new_crtc_state->uapi.async_flip)
return 0;
- if (intel_crtc_needs_modeset(new_crtc_state)) {
- drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
+ if (!new_crtc_state->hw.active) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] not active\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
- if (!new_crtc_state->hw.active) {
- drm_dbg_kms(&i915->drm, "CRTC inactive\n");
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] modeset required\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
+
if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
drm_dbg_kms(&i915->drm,
- "Active planes cannot be changed during async flip\n");
+ "[CRTC:%d:%s] Active planes cannot be in async flip\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
@@ -7528,78 +7526,98 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
+ case I915_FORMAT_MOD_4_TILED:
break;
default:
drm_dbg_kms(&i915->drm,
- "Linear memory/CCS does not support async flips\n");
+ "[PLANE:%d:%s] Modifier does not support async flips\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (new_plane_state->hw.fb->format->num_planes > 1) {
drm_dbg_kms(&i915->drm,
- "Planar formats not supported with async flips\n");
+ "[PLANE:%d:%s] Planar formats do not support async flips\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->view.color_plane[0].mapping_stride !=
new_plane_state->view.color_plane[0].mapping_stride) {
- drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.fb->modifier !=
new_plane_state->hw.fb->modifier) {
drm_dbg_kms(&i915->drm,
- "Framebuffer modifiers cannot be changed in async flip\n");
+ "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.fb->format !=
new_plane_state->hw.fb->format) {
drm_dbg_kms(&i915->drm,
- "Framebuffer format cannot be changed in async flip\n");
+ "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.rotation !=
new_plane_state->hw.rotation) {
- drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
!drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
drm_dbg_kms(&i915->drm,
- "Plane size/co-ordinates cannot be changed in async flip\n");
+ "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
- drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
+ drm_dbg_kms(&i915->drm,
+ "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.pixel_blend_mode !=
new_plane_state->hw.pixel_blend_mode) {
drm_dbg_kms(&i915->drm,
- "Pixel blend mode cannot be changed in async flip\n");
+ "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
drm_dbg_kms(&i915->drm,
- "Color encoding cannot be changed in async flip\n");
+ "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
- drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
}
/* plane decryption is allow to change only in synchronous flips */
- if (old_plane_state->decrypt != new_plane_state->decrypt)
+ if (old_plane_state->decrypt != new_plane_state->decrypt) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
return -EINVAL;
+ }
}
return 0;
@@ -7772,7 +7790,7 @@ static int intel_atomic_check(struct drm_device *dev,
}
}
- if (new_crtc_state->bigjoiner) {
+ if (new_crtc_state->bigjoiner_pipes) {
if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
@@ -7970,7 +7988,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
if (!modeset) {
if (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe)
- intel_color_commit(new_crtc_state);
+ intel_color_commit_arm(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
bdw_set_pipemisc(new_crtc_state);
@@ -8050,6 +8068,11 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc);
+ if (!modeset &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe))
+ intel_color_commit_noarm(new_crtc_state);
+
intel_crtc_planes_update_noarm(state, crtc);
/* Perform vblank evasion around commit operation */
@@ -8384,7 +8407,9 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s
/*
* The layout of the fast clear color value expected by HW
- * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
+ * (the DRM ABI requiring this value to be located in fb at
+ * offset 0 of cc plane, plane #2 previous generations or
+ * plane #1 for flat ccs):
* - 4 x 4 bytes per-channel value
* (in surface type specific float/int format provided by the fb user)
* - 8 bytes native color value used by the display
@@ -8410,7 +8435,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
- u64 put_domains[I915_MAX_PIPES] = {};
+ struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
intel_wakeref_t wakeref = 0;
int i;
@@ -8427,9 +8452,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
-
- put_domains[crtc->pipe] =
- modeset_get_crtc_power_domains(new_crtc_state);
+ modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
}
}
@@ -8467,6 +8490,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_encoders_update_prepare(state);
intel_dbuf_pre_plane_update(state);
+ intel_mbus_dbox_update(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->do_async_flip)
@@ -8528,7 +8552,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
- modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
+ modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
@@ -9573,8 +9597,6 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
- i915->framestart_delay = 1; /* 1-4 */
-
i915->window2_delay = 0; /* No DSB so no window2 delay */
intel_mode_config_init(i915);
@@ -9643,7 +9665,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
}
intel_plane_possible_crtcs_init(i915);
- intel_shared_dpll_init(dev);
+ intel_shared_dpll_init(i915);
intel_fdi_pll_freq_update(i915);
intel_update_czclk(i915);
@@ -9750,9 +9772,6 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
- intel_de_write(dev_priv, FP0(pipe), fp);
- intel_de_write(dev_priv, FP1(pipe), fp);
-
intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
@@ -9761,6 +9780,9 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+ intel_de_write(dev_priv, FP0(pipe), fp);
+ intel_de_write(dev_priv, FP1(pipe), fp);
+
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
@@ -9871,64 +9893,6 @@ static struct intel_connector *intel_encoder_find_connector(struct intel_encoder
return NULL;
}
-static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
- enum pipe pch_transcoder)
-{
- return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
- (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
-}
-
-static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
- i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
- u32 val;
-
- if (transcoder_is_dsi(cpu_transcoder))
- return;
-
- val = intel_de_read(dev_priv, reg);
- val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
- } else {
- i915_reg_t reg = PIPECONF(cpu_transcoder);
- u32 val;
-
- val = intel_de_read(dev_priv, reg);
- val &= ~PIPECONF_FRAME_START_DELAY_MASK;
- val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
- }
-
- if (!crtc_state->has_pch_encoder)
- return;
-
- if (HAS_PCH_IBX(dev_priv)) {
- i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
- u32 val;
-
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
- } else {
- enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
- i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
- u32 val;
-
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
- }
-}
-
static void intel_sanitize_crtc(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -9939,9 +9903,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
if (crtc_state->hw.active) {
struct intel_plane *plane;
- /* Clear any frame start delays used for debugging left by the BIOS */
- intel_sanitize_frame_start_delay(crtc_state);
-
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
const struct intel_plane_state *plane_state =
@@ -9953,7 +9914,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
}
/* Disable any background color/etc. set by the BIOS */
- intel_color_commit(crtc_state);
+ intel_color_commit_noarm(crtc_state);
+ intel_color_commit_arm(crtc_state);
}
/* Adjust the state of the output pipe according to whether we
@@ -9986,7 +9948,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
* PCH transcoders B and C would prevent enabling the south
* error interrupt (see cpt_can_enable_serr_int()).
*/
- if (has_pch_trancoder(dev_priv, crtc->pipe))
+ if (intel_has_pch_trancoder(dev_priv, crtc->pipe))
crtc->pch_fifo_underrun_disabled = true;
}
}
@@ -10105,7 +10067,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm,
"[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
plane->base.base.id, plane->base.name,
- enableddisabled(visible), pipe_name(pipe));
+ str_enabled_disabled(visible), pipe_name(pipe));
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -10151,7 +10113,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- enableddisabled(crtc_state->hw.active));
+ str_enabled_disabled(crtc_state->hw.active));
}
cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
@@ -10171,7 +10133,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
intel_encoder_get_config(encoder, crtc_state);
/* read out to slave crtc as well for bigjoiner */
- if (crtc_state->bigjoiner) {
+ if (crtc_state->bigjoiner_pipes) {
struct intel_crtc *slave_crtc;
/* encoder should read be linked to bigjoiner master */
@@ -10195,7 +10157,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_dbg_kms(&dev_priv->drm,
"[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id, encoder->base.name,
- enableddisabled(encoder->base.crtc),
+ str_enabled_disabled(encoder->base.crtc),
pipe_name(pipe));
}
@@ -10233,7 +10195,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_dbg_kms(&dev_priv->drm,
"[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id, connector->base.name,
- enableddisabled(connector->base.encoder));
+ str_enabled_disabled(connector->base.encoder));
}
drm_connector_list_iter_end(&conn_iter);
@@ -10359,66 +10321,6 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
}
}
-static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
- enum port port, i915_reg_t hdmi_reg)
-{
- u32 val = intel_de_read(dev_priv, hdmi_reg);
-
- if (val & SDVO_ENABLE ||
- (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "Sanitizing transcoder select for HDMI %c\n",
- port_name(port));
-
- val &= ~SDVO_PIPE_SEL_MASK;
- val |= SDVO_PIPE_SEL(PIPE_A);
-
- intel_de_write(dev_priv, hdmi_reg, val);
-}
-
-static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
- enum port port, i915_reg_t dp_reg)
-{
- u32 val = intel_de_read(dev_priv, dp_reg);
-
- if (val & DP_PORT_EN ||
- (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "Sanitizing transcoder select for DP %c\n",
- port_name(port));
-
- val &= ~DP_PIPE_SEL_MASK;
- val |= DP_PIPE_SEL(PIPE_A);
-
- intel_de_write(dev_priv, dp_reg, val);
-}
-
-static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
-{
- /*
- * The BIOS may select transcoder B on some of the PCH
- * ports even it doesn't enable the port. This would trip
- * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
- * Sanitize the transcoder select bits to prevent that. We
- * assume that the BIOS never actually enabled the port,
- * because if it did we'd actually have to toggle the port
- * on and back off to make the transcoder A select stick
- * (see. intel_dp_link_down(), intel_disable_hdmi(),
- * intel_disable_sdvo()).
- */
- ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
-
- /* PCH SDVOB multiplex with HDMIB */
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
-}
/* Scan out the current hw modeset state,
* and sanitizes it to the current state
@@ -10440,8 +10342,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
/* HW state is read out, now we need to sanitize this mess. */
get_encoder_power_domains(dev_priv);
- if (HAS_PCH_IBX(dev_priv))
- ibx_sanitize_pch_ports(dev_priv);
+ intel_pch_sanitize(dev_priv);
/*
* intel_sanitize_plane_mapping() may need to do vblank
@@ -10457,6 +10358,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_crtc_vblank_on(crtc_state);
}
+ intel_fbc_sanitize(dev_priv);
+
intel_sanitize_plane_mapping(dev_priv);
for_each_intel_encoder(dev, encoder)
@@ -10490,11 +10393,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- u64 put_domains;
+ struct intel_power_domain_mask put_domains;
- put_domains = modeset_get_crtc_power_domains(crtc_state);
- if (drm_WARN_ON(dev, put_domains))
- modeset_put_crtc_power_domains(crtc, put_domains);
+ modeset_get_crtc_power_domains(crtc_state, &put_domains);
+ if (drm_WARN_ON(dev, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
+ modeset_put_crtc_power_domains(crtc, &put_domains);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
@@ -10595,8 +10498,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
intel_unregister_dsm_handler();
- intel_fbc_global_disable(i915);
-
/* flush any delayed tasks or pending work */
flush_scheduled_work();
@@ -10697,3 +10598,8 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
acpi_video_unregister();
intel_opregion_unregister(i915);
}
+
+bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 11d6134c53c8..187910d94ec6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -565,7 +565,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
-enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
@@ -636,11 +635,9 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-enum intel_display_power_domain intel_port_to_power_domain(enum port port);
+enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
-enum intel_display_power_domain
-intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
@@ -695,4 +692,6 @@ void assert_transcoder(struct drm_i915_private *dev_priv,
#define I915_STATE_WARN_ON(x) \
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
+bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index ffe6822d7414..452d773fd4e3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -3,6 +3,8 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/string_helpers.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
@@ -10,6 +12,7 @@
#include "intel_de.h"
#include "intel_display_debugfs.h"
#include "intel_display_power.h"
+#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -19,6 +22,7 @@
#include "intel_fbdev.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
+#include "intel_panel.h"
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -52,7 +56,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
- yesno(dev_priv->params.enable_ips));
+ str_yes_no(dev_priv->params.enable_ips));
if (DISPLAY_VER(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
@@ -92,7 +96,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
- seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
+ seq_printf(m, "self-refresh: %s\n", str_enabled_disabled(sr_enabled));
return 0;
}
@@ -260,7 +264,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
bool enabled;
u32 val;
- seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
+ seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
if (psr->sink_support)
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
seq_puts(m, "\n");
@@ -279,7 +283,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
if (!psr->enabled) {
seq_printf(m, "PSR sink not reliable: %s\n",
- yesno(psr->sink_not_reliable));
+ str_yes_no(psr->sink_not_reliable));
goto unlock;
}
@@ -294,7 +298,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
- enableddisabled(enabled), val);
+ str_enabled_disabled(enabled), val);
psr_source_status(intel_dp, m);
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
psr->busy_frontbuffer_bits);
@@ -341,7 +345,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
}
seq_printf(m, "PSR2 selective fetch: %s\n",
- enableddisabled(psr->psr2_sel_fetch_enabled));
+ str_enabled_disabled(psr->psr2_sel_fetch_enabled));
}
unlock:
@@ -432,75 +436,6 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_dmc_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- struct intel_dmc *dmc;
- i915_reg_t dc5_reg, dc6_reg = {};
-
- if (!HAS_DMC(dev_priv))
- return -ENODEV;
-
- dmc = &dev_priv->dmc;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
- seq_printf(m, "path: %s\n", dmc->fw_path);
- seq_printf(m, "Pipe A fw support: %s\n",
- yesno(GRAPHICS_VER(dev_priv) >= 12));
- seq_printf(m, "Pipe A fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEA].payload));
- seq_printf(m, "Pipe B fw support: %s\n", yesno(IS_ALDERLAKE_P(dev_priv)));
- seq_printf(m, "Pipe B fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEB].payload));
-
- if (!intel_dmc_has_payload(dev_priv))
- goto out;
-
- seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
- DMC_VERSION_MINOR(dmc->version));
-
- if (DISPLAY_VER(dev_priv) >= 12) {
- if (IS_DGFX(dev_priv)) {
- dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
- } else {
- dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
- dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
- }
-
- /*
- * NOTE: DMC_DEBUG3 is a general purpose reg.
- * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
- * reg for DC3CO debugging and validation,
- * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
- */
- seq_printf(m, "DC3CO count: %d\n", intel_de_read(dev_priv, IS_DGFX(dev_priv) ?
- DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3));
- } else {
- dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
- SKL_DMC_DC3_DC5_COUNT;
- if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv))
- dc6_reg = SKL_DMC_DC5_DC6_COUNT;
- }
-
- seq_printf(m, "DC3 -> DC5 count: %d\n",
- intel_de_read(dev_priv, dc5_reg));
- if (dc6_reg.reg)
- seq_printf(m, "DC5 -> DC6 count: %d\n",
- intel_de_read(dev_priv, dc6_reg));
-
-out:
- seq_printf(m, "program base: 0x%08x\n",
- intel_de_read(dev_priv, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
- seq_printf(m, "ssp base: 0x%08x\n",
- intel_de_read(dev_priv, DMC_SSP_BASE));
- seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
static void intel_seq_print_mode(struct seq_file *m, int tabs,
const struct drm_display_mode *mode)
{
@@ -537,11 +472,18 @@ static void intel_encoder_info(struct seq_file *m,
drm_connector_list_iter_end(&conn_iter);
}
-static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+static void intel_panel_info(struct seq_file *m,
+ struct intel_connector *connector)
{
- const struct drm_display_mode *mode = panel->fixed_mode;
+ const struct drm_display_mode *fixed_mode;
+
+ if (list_empty(&connector->panel.fixed_modes))
+ return;
- seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+ seq_puts(m, "\tfixed modes:\n");
+
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head)
+ intel_seq_print_mode(m, 2, fixed_mode);
}
static void intel_hdcp_info(struct seq_file *m,
@@ -577,9 +519,8 @@ static void intel_dp_info(struct seq_file *m,
const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
- seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
- if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
- intel_panel_info(m, &intel_connector->panel);
+ seq_printf(m, "\taudio support: %s\n",
+ str_yes_no(intel_dp->has_audio));
drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
edid ? edid->data : NULL, &intel_dp->aux);
@@ -590,7 +531,7 @@ static void intel_dp_mst_info(struct seq_file *m,
{
bool has_audio = intel_connector->port->has_audio;
- seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
+ seq_printf(m, "\taudio support: %s\n", str_yes_no(has_audio));
}
static void intel_hdmi_info(struct seq_file *m,
@@ -599,13 +540,8 @@ static void intel_hdmi_info(struct seq_file *m,
struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
- seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
-}
-
-static void intel_lvds_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- intel_panel_info(m, &intel_connector->panel);
+ seq_printf(m, "\taudio support: %s\n",
+ str_yes_no(intel_hdmi->has_audio));
}
static void intel_connector_info(struct seq_file *m,
@@ -642,10 +578,6 @@ static void intel_connector_info(struct seq_file *m,
else
intel_dp_info(m, intel_connector);
break;
- case DRM_MODE_CONNECTOR_LVDS:
- if (encoder->type == INTEL_OUTPUT_LVDS)
- intel_lvds_info(m, intel_connector);
- break;
case DRM_MODE_CONNECTOR_HDMIA:
if (encoder->type == INTEL_OUTPUT_HDMI ||
encoder->type == INTEL_OUTPUT_DDI)
@@ -658,6 +590,8 @@ static void intel_connector_info(struct seq_file *m,
seq_puts(m, "\tHDCP version: ");
intel_hdcp_info(m, intel_connector);
+ intel_panel_info(m, intel_connector);
+
seq_printf(m, "\tmodes:\n");
list_for_each_entry(mode, &connector->modes, head)
intel_seq_print_mode(m, 2, mode);
@@ -757,7 +691,7 @@ static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
fb->base.id, &fb->format->format,
fb->modifier, fb->width, fb->height,
- yesno(plane_state->uapi.visible),
+ str_yes_no(plane_state->uapi.visible),
DRM_RECT_FP_ARG(&plane_state->uapi.src),
DRM_RECT_ARG(&plane_state->uapi.dst),
rot_str);
@@ -796,7 +730,7 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
&crtc_state->scaler_state.scalers[i];
seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
- i, yesno(sc->in_use), sc->mode);
+ i, str_yes_no(sc->in_use), sc->mode);
}
seq_puts(m, "\n");
} else {
@@ -919,24 +853,24 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
crtc->base.base.id, crtc->base.name);
seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
- yesno(crtc_state->uapi.enable),
- yesno(crtc_state->uapi.active),
+ str_yes_no(crtc_state->uapi.enable),
+ str_yes_no(crtc_state->uapi.active),
DRM_MODE_ARG(&crtc_state->uapi.mode));
seq_printf(m, "\thw: enable=%s, active=%s\n",
- yesno(crtc_state->hw.enable), yesno(crtc_state->hw.active));
+ str_yes_no(crtc_state->hw.enable), str_yes_no(crtc_state->hw.active));
seq_printf(m, "\tadjusted_mode=" DRM_MODE_FMT "\n",
DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
seq_printf(m, "\tpipe__mode=" DRM_MODE_FMT "\n",
DRM_MODE_ARG(&crtc_state->hw.pipe_mode));
- seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
- crtc_state->pipe_src_w, crtc_state->pipe_src_h,
- yesno(crtc_state->dither), crtc_state->pipe_bpp);
+ seq_printf(m, "\tpipe src=" DRM_RECT_FMT ", dither=%s, bpp=%d\n",
+ DRM_RECT_ARG(&crtc_state->pipe_src),
+ str_yes_no(crtc_state->dither), crtc_state->pipe_bpp);
intel_scaler_info(m, crtc);
- if (crtc_state->bigjoiner)
+ if (crtc_state->bigjoiner_pipes)
seq_printf(m, "\tLinked to 0x%x pipes as a %s\n",
crtc_state->bigjoiner_pipes,
intel_crtc_is_bigjoiner_slave(crtc_state) ? "slave" : "master");
@@ -948,8 +882,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
intel_plane_info(m, crtc);
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
- yesno(!crtc->cpu_fifo_underrun_disabled),
- yesno(!crtc->pch_fifo_underrun_disabled));
+ str_yes_no(!crtc->cpu_fifo_underrun_disabled),
+ str_yes_no(!crtc->pch_fifo_underrun_disabled));
crtc_updates_info(m, crtc, "\t");
}
@@ -1005,7 +939,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
pll->info->id);
seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
- pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
+ pll->state.pipe_mask, pll->active_mask,
+ str_yes_no(pll->on));
seq_printf(m, " tracked hardware state:\n");
seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
seq_printf(m, " dpll_md: 0x%08x\n",
@@ -1047,7 +982,7 @@ static int i915_ipc_status_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
seq_printf(m, "Isochronous Priority Control: %s\n",
- yesno(dev_priv->ipc_enabled));
+ str_yes_no(dev_priv->ipc_enabled));
return 0;
}
@@ -1117,13 +1052,13 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
for_each_plane_id_on_crtc(crtc, plane_id) {
- entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ entry = &crtc_state->wm.skl.plane_ddb[plane_id];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
entry->start, entry->end,
skl_ddb_entry_size(entry));
}
- entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
+ entry = &crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
entry->end, skl_ddb_entry_size(entry));
}
@@ -1133,97 +1068,48 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
return 0;
}
-static void drrs_status_per_crtc(struct seq_file *m,
- struct drm_device *dev,
- struct intel_crtc *crtc)
+static int i915_drrs_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_drrs *drrs = &dev_priv->drrs;
- int vrefresh = 0;
- struct drm_connector *connector;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ struct intel_crtc *crtc;
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- bool supported = false;
-
- if (connector->state->crtc != &crtc->base)
- continue;
-
- seq_printf(m, "%s:\n", connector->name);
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
- drrs->type == SEAMLESS_DRRS_SUPPORT)
- supported = true;
-
- seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ seq_printf(m, "[CONNECTOR:%d:%s] DRRS type: %s\n",
+ connector->base.base.id, connector->base.name,
+ intel_drrs_type_str(intel_panel_drrs_type(connector)));
}
drm_connector_list_iter_end(&conn_iter);
seq_puts(m, "\n");
- if (to_intel_crtc_state(crtc->base.state)->has_drrs) {
- struct intel_panel *panel;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
- mutex_lock(&drrs->mutex);
- /* DRRS Supported */
- seq_puts(m, "\tDRRS Enabled: Yes\n");
+ seq_printf(m, "[CRTC:%d:%s]:\n",
+ crtc->base.base.id, crtc->base.name);
- /* disable_drrs() will make drrs->dp NULL */
- if (!drrs->dp) {
- seq_puts(m, "Idleness DRRS: Disabled\n");
- mutex_unlock(&drrs->mutex);
- return;
- }
+ mutex_lock(&crtc->drrs.mutex);
- panel = &drrs->dp->attached_connector->panel;
- seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
- drrs->busy_frontbuffer_bits);
-
- seq_puts(m, "\n\t\t");
- if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
- seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
- vrefresh = drm_mode_vrefresh(panel->fixed_mode);
- } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
- seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
- vrefresh = drm_mode_vrefresh(panel->downclock_mode);
- } else {
- seq_printf(m, "DRRS_State: Unknown(%d)\n",
- drrs->refresh_rate_type);
- mutex_unlock(&drrs->mutex);
- return;
- }
- seq_printf(m, "\t\tVrefresh: %d", vrefresh);
+ /* DRRS Supported */
+ seq_printf(m, "\tDRRS Enabled: %s\n",
+ str_yes_no(crtc_state->has_drrs));
- seq_puts(m, "\n\t\t");
- mutex_unlock(&drrs->mutex);
- } else {
- /* DRRS not supported. Print the VBT parameter*/
- seq_puts(m, "\tDRRS Enabled : No");
- }
- seq_puts(m, "\n");
-}
+ seq_printf(m, "\tDRRS Active: %s\n",
+ str_yes_no(intel_drrs_is_active(crtc)));
-static int i915_drrs_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
- int active_crtc_cnt = 0;
+ seq_printf(m, "\tBusy_frontbuffer_bits: 0x%X\n",
+ crtc->drrs.busy_frontbuffer_bits);
- drm_modeset_lock_all(dev);
- for_each_intel_crtc(dev, crtc) {
- if (crtc->base.state->active) {
- active_crtc_cnt++;
- seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
+ seq_printf(m, "\tDRRS refresh rate: %s\n",
+ crtc->drrs.refresh_rate == DRRS_REFRESH_RATE_LOW ?
+ "low" : "high");
- drrs_status_per_crtc(m, dev, crtc);
- }
+ mutex_unlock(&crtc->drrs.mutex);
}
- drm_modeset_unlock_all(dev);
-
- if (!active_crtc_cnt)
- seq_puts(m, "No active crtc found\n");
return 0;
}
@@ -1259,7 +1145,7 @@ static int i915_lpsp_status(struct seq_file *m, void *unused)
return 0;
}
- seq_printf(m, "LPSP: %s\n", enableddisabled(lpsp_enabled));
+ seq_printf(m, "LPSP: %s\n", str_enabled_disabled(lpsp_enabled));
return 0;
}
@@ -1740,7 +1626,7 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
- yesno(delayed_work_pending(&hotplug->reenable_work)));
+ str_yes_no(delayed_work_pending(&hotplug->reenable_work)));
return 0;
}
@@ -1814,7 +1700,7 @@ static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
seq_printf(m, "Enabled: %s\n",
- yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+ str_yes_no(dev_priv->hotplug.hpd_short_storm_enabled));
return 0;
}
@@ -1888,13 +1774,8 @@ static int i915_drrs_ctl_set(void *data, u64 val)
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
- if (DISPLAY_VER(dev_priv) < 7)
- return -ENODEV;
-
for_each_intel_crtc(dev, crtc) {
- struct drm_connector_list_iter conn_iter;
struct intel_crtc_state *crtc_state;
- struct drm_connector *connector;
struct drm_crtc_commit *commit;
int ret;
@@ -1915,30 +1796,13 @@ static int i915_drrs_ctl_set(void *data, u64 val)
goto out;
}
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp;
-
- if (!(crtc_state->uapi.connector_mask &
- drm_connector_mask(connector)))
- continue;
-
- encoder = intel_attached_encoder(to_intel_connector(connector));
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- drm_dbg(&dev_priv->drm,
- "Manually %sabling DRRS. %llu\n",
- val ? "en" : "dis", val);
+ drm_dbg(&dev_priv->drm,
+ "Manually %sactivating DRRS\n", val ? "" : "de");
- intel_dp = enc_to_intel_dp(encoder);
- if (val)
- intel_drrs_enable(intel_dp, crtc_state);
- else
- intel_drrs_disable(intel_dp, crtc_state);
- }
- drm_connector_list_iter_end(&conn_iter);
+ if (val)
+ intel_drrs_activate(crtc_state);
+ else
+ intel_drrs_deactivate(crtc_state);
out:
drm_modeset_unlock(&crtc->base.mutex);
@@ -2020,7 +1884,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
- {"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
@@ -2064,6 +1927,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
+ intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
}
@@ -2209,14 +2073,14 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
intel_dp = intel_attached_dp(to_intel_connector(connector));
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "DSC_Enabled: %s\n",
- yesno(crtc_state->dsc.compression_enable));
+ str_yes_no(crtc_state->dsc.compression_enable));
seq_printf(m, "DSC_Sink_Support: %s\n",
- yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ str_yes_no(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
seq_printf(m, "Force_DSC_Enable: %s\n",
- yesno(intel_dp->force_dsc_en));
+ str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
seq_printf(m, "FEC_Sink_Support: %s\n",
- yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
+ str_yes_no(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
} while (try_again);
drm_modeset_drop_locks(&ctx);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 9ebae7ac3235..949edc983a16 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,123 +3,32 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/string_helpers.h>
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
-#include "intel_combo_phy_regs.h"
-#include "intel_crt.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_power_map.h"
+#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
-#include "intel_dpio_phy.h"
-#include "intel_dpll.h"
-#include "intel_hotplug.h"
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
#include "intel_pm.h"
-#include "intel_pps.h"
#include "intel_snps_phy.h"
-#include "intel_tc.h"
-#include "intel_vga.h"
#include "vlv_sideband.h"
-struct i915_power_well_ops {
- /*
- * Synchronize the well's hw state to match the current sw state, for
- * example enable/disable it based on the current refcount. Called
- * during driver init and resume time, possibly after first calling
- * the enable/disable handlers.
- */
- void (*sync_hw)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /*
- * Enable the well and resources that depend on it (for example
- * interrupts located on the well). Called after the 0->1 refcount
- * transition.
- */
- void (*enable)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /*
- * Disable the well and resources that depend on it. Called after
- * the 1->0 refcount transition.
- */
- void (*disable)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /* Returns the hw enabled state. */
- bool (*is_enabled)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
-};
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if(test_bit((__domain), (__power_well)->domains.bits))
-struct i915_power_well_regs {
- i915_reg_t bios;
- i915_reg_t driver;
- i915_reg_t kvmr;
- i915_reg_t debug;
-};
-
-/* Power well structure for haswell */
-struct i915_power_well_desc {
- const char *name;
- bool always_on;
- u64 domains;
- /* unique identifier for this power well */
- enum i915_power_well_id id;
- /*
- * Arbitraty data associated with this power well. Platform and power
- * well specific.
- */
- union {
- struct {
- /*
- * request/status flag index in the PUNIT power well
- * control/status registers.
- */
- u8 idx;
- } vlv;
- struct {
- enum dpio_phy phy;
- } bxt;
- struct {
- const struct i915_power_well_regs *regs;
- /*
- * request/status flag index in the power well
- * constrol/status registers.
- */
- u8 idx;
- /* Mask of pipes whose IRQ logic is backed by the pw */
- u8 irq_pipe_mask;
- /*
- * Instead of waiting for the status bit to ack enables,
- * just wait a specific amount of time and then consider
- * the well enabled.
- */
- u16 fixed_enable_delay;
- /* The pw is backing the VGA functionality */
- bool has_vga:1;
- bool has_fuses:1;
- /*
- * The pw is for an ICL+ TypeC PHY port in
- * Thunderbolt mode.
- */
- bool is_tc_tbt:1;
- } hsw;
- };
- const struct i915_power_well_ops *ops;
-};
-
-struct i915_power_well {
- const struct i915_power_well_desc *desc;
- /* power well enable/disable usage count */
- int count;
- /* cached hw enabled state */
- bool hw_enabled;
-};
-
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id);
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
+ for_each_power_well_reverse(__dev_priv, __power_well) \
+ for_each_if(test_bit((__domain), (__power_well)->domains.bits))
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
@@ -135,14 +44,14 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PIPE_C";
case POWER_DOMAIN_PIPE_D:
return "PIPE_D";
- case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
- return "PIPE_A_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
- return "PIPE_B_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
- return "PIPE_C_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
- return "PIPE_D_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
+ return "PIPE_PANEL_FITTER_A";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
+ return "PIPE_PANEL_FITTER_B";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
+ return "PIPE_PANEL_FITTER_C";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
+ return "PIPE_PANEL_FITTER_D";
case POWER_DOMAIN_TRANSCODER_A:
return "TRANSCODER_A";
case POWER_DOMAIN_TRANSCODER_B:
@@ -153,48 +62,60 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_D";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
- case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
- return "TRANSCODER_VDSC_PW2";
case POWER_DOMAIN_TRANSCODER_DSI_A:
return "TRANSCODER_DSI_A";
case POWER_DOMAIN_TRANSCODER_DSI_C:
return "TRANSCODER_DSI_C";
- case POWER_DOMAIN_PORT_DDI_A_LANES:
- return "PORT_DDI_A_LANES";
- case POWER_DOMAIN_PORT_DDI_B_LANES:
- return "PORT_DDI_B_LANES";
- case POWER_DOMAIN_PORT_DDI_C_LANES:
- return "PORT_DDI_C_LANES";
- case POWER_DOMAIN_PORT_DDI_D_LANES:
- return "PORT_DDI_D_LANES";
- case POWER_DOMAIN_PORT_DDI_E_LANES:
- return "PORT_DDI_E_LANES";
- case POWER_DOMAIN_PORT_DDI_F_LANES:
- return "PORT_DDI_F_LANES";
- case POWER_DOMAIN_PORT_DDI_G_LANES:
- return "PORT_DDI_G_LANES";
- case POWER_DOMAIN_PORT_DDI_H_LANES:
- return "PORT_DDI_H_LANES";
- case POWER_DOMAIN_PORT_DDI_I_LANES:
- return "PORT_DDI_I_LANES";
- case POWER_DOMAIN_PORT_DDI_A_IO:
- return "PORT_DDI_A_IO";
- case POWER_DOMAIN_PORT_DDI_B_IO:
- return "PORT_DDI_B_IO";
- case POWER_DOMAIN_PORT_DDI_C_IO:
- return "PORT_DDI_C_IO";
- case POWER_DOMAIN_PORT_DDI_D_IO:
- return "PORT_DDI_D_IO";
- case POWER_DOMAIN_PORT_DDI_E_IO:
- return "PORT_DDI_E_IO";
- case POWER_DOMAIN_PORT_DDI_F_IO:
- return "PORT_DDI_F_IO";
- case POWER_DOMAIN_PORT_DDI_G_IO:
- return "PORT_DDI_G_IO";
- case POWER_DOMAIN_PORT_DDI_H_IO:
- return "PORT_DDI_H_IO";
- case POWER_DOMAIN_PORT_DDI_I_IO:
- return "PORT_DDI_I_IO";
+ case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
+ return "TRANSCODER_VDSC_PW2";
+ case POWER_DOMAIN_PORT_DDI_LANES_A:
+ return "PORT_DDI_LANES_A";
+ case POWER_DOMAIN_PORT_DDI_LANES_B:
+ return "PORT_DDI_LANES_B";
+ case POWER_DOMAIN_PORT_DDI_LANES_C:
+ return "PORT_DDI_LANES_C";
+ case POWER_DOMAIN_PORT_DDI_LANES_D:
+ return "PORT_DDI_LANES_D";
+ case POWER_DOMAIN_PORT_DDI_LANES_E:
+ return "PORT_DDI_LANES_E";
+ case POWER_DOMAIN_PORT_DDI_LANES_F:
+ return "PORT_DDI_LANES_F";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC1:
+ return "PORT_DDI_LANES_TC1";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC2:
+ return "PORT_DDI_LANES_TC2";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC3:
+ return "PORT_DDI_LANES_TC3";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC4:
+ return "PORT_DDI_LANES_TC4";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC5:
+ return "PORT_DDI_LANES_TC5";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC6:
+ return "PORT_DDI_LANES_TC6";
+ case POWER_DOMAIN_PORT_DDI_IO_A:
+ return "PORT_DDI_IO_A";
+ case POWER_DOMAIN_PORT_DDI_IO_B:
+ return "PORT_DDI_IO_B";
+ case POWER_DOMAIN_PORT_DDI_IO_C:
+ return "PORT_DDI_IO_C";
+ case POWER_DOMAIN_PORT_DDI_IO_D:
+ return "PORT_DDI_IO_D";
+ case POWER_DOMAIN_PORT_DDI_IO_E:
+ return "PORT_DDI_IO_E";
+ case POWER_DOMAIN_PORT_DDI_IO_F:
+ return "PORT_DDI_IO_F";
+ case POWER_DOMAIN_PORT_DDI_IO_TC1:
+ return "PORT_DDI_IO_TC1";
+ case POWER_DOMAIN_PORT_DDI_IO_TC2:
+ return "PORT_DDI_IO_TC2";
+ case POWER_DOMAIN_PORT_DDI_IO_TC3:
+ return "PORT_DDI_IO_TC3";
+ case POWER_DOMAIN_PORT_DDI_IO_TC4:
+ return "PORT_DDI_IO_TC4";
+ case POWER_DOMAIN_PORT_DDI_IO_TC5:
+ return "PORT_DDI_IO_TC5";
+ case POWER_DOMAIN_PORT_DDI_IO_TC6:
+ return "PORT_DDI_IO_TC6";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -219,28 +140,32 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "AUX_E";
case POWER_DOMAIN_AUX_F:
return "AUX_F";
- case POWER_DOMAIN_AUX_G:
- return "AUX_G";
- case POWER_DOMAIN_AUX_H:
- return "AUX_H";
- case POWER_DOMAIN_AUX_I:
- return "AUX_I";
+ case POWER_DOMAIN_AUX_USBC1:
+ return "AUX_USBC1";
+ case POWER_DOMAIN_AUX_USBC2:
+ return "AUX_USBC2";
+ case POWER_DOMAIN_AUX_USBC3:
+ return "AUX_USBC3";
+ case POWER_DOMAIN_AUX_USBC4:
+ return "AUX_USBC4";
+ case POWER_DOMAIN_AUX_USBC5:
+ return "AUX_USBC5";
+ case POWER_DOMAIN_AUX_USBC6:
+ return "AUX_USBC6";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
- case POWER_DOMAIN_AUX_C_TBT:
- return "AUX_C_TBT";
- case POWER_DOMAIN_AUX_D_TBT:
- return "AUX_D_TBT";
- case POWER_DOMAIN_AUX_E_TBT:
- return "AUX_E_TBT";
- case POWER_DOMAIN_AUX_F_TBT:
- return "AUX_F_TBT";
- case POWER_DOMAIN_AUX_G_TBT:
- return "AUX_G_TBT";
- case POWER_DOMAIN_AUX_H_TBT:
- return "AUX_H_TBT";
- case POWER_DOMAIN_AUX_I_TBT:
- return "AUX_I_TBT";
+ case POWER_DOMAIN_AUX_TBT1:
+ return "AUX_TBT1";
+ case POWER_DOMAIN_AUX_TBT2:
+ return "AUX_TBT2";
+ case POWER_DOMAIN_AUX_TBT3:
+ return "AUX_TBT3";
+ case POWER_DOMAIN_AUX_TBT4:
+ return "AUX_TBT4";
+ case POWER_DOMAIN_AUX_TBT5:
+ return "AUX_TBT5";
+ case POWER_DOMAIN_AUX_TBT6:
+ return "AUX_TBT6";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -259,40 +184,6 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
}
}
-static void intel_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
- power_well->desc->ops->enable(dev_priv, power_well);
- power_well->hw_enabled = true;
-}
-
-static void intel_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
- power_well->hw_enabled = false;
- power_well->desc->ops->disable(dev_priv, power_well);
-}
-
-static void intel_power_well_get(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (!power_well->count++)
- intel_power_well_enable(dev_priv, power_well);
-}
-
-static void intel_power_well_put(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- drm_WARN(&dev_priv->drm, !power_well->count,
- "Use count on power well %s is already zero",
- power_well->desc->name);
-
- if (!--power_well->count)
- intel_power_well_disable(dev_priv, power_well);
-}
-
/**
* __intel_display_power_is_enabled - unlocked check for a power domain
* @dev_priv: i915 device instance
@@ -316,11 +207,11 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
is_enabled = true;
- for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
- if (power_well->desc->always_on)
+ for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
+ if (intel_power_well_is_always_on(power_well))
continue;
- if (!power_well->hw_enabled) {
+ if (!intel_power_well_is_enabled_cached(power_well)) {
is_enabled = false;
break;
}
@@ -361,634 +252,6 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
return ret;
}
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
- u8 irq_pipe_mask, bool has_vga)
-{
- if (has_vga)
- intel_vga_reset_io_mem(dev_priv);
-
- if (irq_pipe_mask)
- gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
-}
-
-static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
- u8 irq_pipe_mask)
-{
- if (irq_pipe_mask)
- gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
-}
-
-#define ICL_AUX_PW_TO_CH(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
-
-#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
-
-static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
-{
- int pw_idx = power_well->desc->hsw.idx;
-
- return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
- ICL_AUX_PW_TO_CH(pw_idx);
-}
-
-static struct intel_digital_port *
-aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
- enum aux_ch aux_ch)
-{
- struct intel_digital_port *dig_port = NULL;
- struct intel_encoder *encoder;
-
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- /* We'll check the MST primary port */
- if (encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- dig_port = enc_to_dig_port(encoder);
- if (!dig_port)
- continue;
-
- if (dig_port->aux_ch != aux_ch) {
- dig_port = NULL;
- continue;
- }
-
- break;
- }
-
- return dig_port;
-}
-
-static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
- const struct i915_power_well *power_well)
-{
- enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
-
- return intel_port_to_phy(i915, dig_port->base.port);
-}
-
-static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- bool timeout_expected)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- int enable_delay = power_well->desc->hsw.fixed_enable_delay;
-
- /*
- * For some power wells we're not supposed to watch the status bit for
- * an ack, but rather just wait a fixed amount of time and then
- * proceed. This is only used on DG2.
- */
- if (IS_DG2(dev_priv) && enable_delay) {
- usleep_range(enable_delay, 2 * enable_delay);
- return;
- }
-
- /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- if (intel_de_wait_for_set(dev_priv, regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
- drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
- power_well->desc->name);
-
- drm_WARN_ON(&dev_priv->drm, !timeout_expected);
-
- }
-}
-
-static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
- const struct i915_power_well_regs *regs,
- int pw_idx)
-{
- u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 ret;
-
- ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
- ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
- if (regs->kvmr.reg)
- ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
- ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
-
- return ret;
-}
-
-static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- bool disabled;
- u32 reqs;
-
- /*
- * Bspec doesn't require waiting for PWs to get disabled, but still do
- * this for paranoia. The known cases where a PW will be forced on:
- * - a KVMR request on any power well via the KVMR request register
- * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
- * DEBUG request registers
- * Skip the wait in case any of the request bits are set and print a
- * diagnostic message.
- */
- wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
- HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
- if (disabled)
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
- power_well->desc->name,
- !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
-}
-
-static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
- enum skl_power_gate pg)
-{
- /* Timeout 5us for PG#0, for other PGs 1us */
- drm_WARN_ON(&dev_priv->drm,
- intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- u32 val;
-
- if (power_well->desc->hsw.has_fuses) {
- enum skl_power_gate pg;
-
- pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
-
- /* Wa_16013190616:adlp */
- if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
-
- /*
- * For PW1 we have to wait both for the PW0/PG0 fuse state
- * before enabling the power well and PW1/PG1's own fuse
- * state after the enabling. For all other power wells with
- * fuses we only have to wait for that PW/PG's fuse state
- * after the enabling.
- */
- if (pg == SKL_PG1)
- gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
- }
-
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
-
- hsw_wait_for_power_well_enable(dev_priv, power_well, false);
-
- if (power_well->desc->hsw.has_fuses) {
- enum skl_power_gate pg;
-
- pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
- gen9_wait_for_power_well_fuses(dev_priv, pg);
- }
-
- hsw_power_well_post_enable(dev_priv,
- power_well->desc->hsw.irq_pipe_mask,
- power_well->desc->hsw.has_vga);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- u32 val;
-
- hsw_power_well_pre_disable(dev_priv,
- power_well->desc->hsw.irq_pipe_mask);
-
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_disable(dev_priv, power_well);
-}
-
-static void
-icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
-
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
-
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
-
- if (DISPLAY_VER(dev_priv) < 12) {
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val | ICL_LANE_ENABLE_AUX);
- }
-
- hsw_wait_for_power_well_enable(dev_priv, power_well, false);
-
- /* Display WA #1178: icl */
- if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
- val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
- val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
- }
-}
-
-static void
-icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
-
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
-
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val & ~ICL_LANE_ENABLE_AUX);
-
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
-
- hsw_wait_for_power_well_disable(dev_priv, power_well);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-
-static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
-
-static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- int refs = hweight64(power_well->desc->domains &
- async_put_domains_mask(&dev_priv->power_domains));
-
- drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
-
- return refs;
-}
-
-static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- struct intel_digital_port *dig_port)
-{
- /* Bypass the check if all references are released asynchronously */
- if (power_well_async_ref_count(dev_priv, power_well) ==
- power_well->count)
- return;
-
- if (drm_WARN_ON(&dev_priv->drm, !dig_port))
- return;
-
- if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
- return;
-
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
-}
-
-#else
-
-static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- struct intel_digital_port *dig_port)
-{
-}
-
-#endif
-
-#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
-
-static void icl_tc_cold_exit(struct drm_i915_private *i915)
-{
- int ret, tries = 0;
-
- while (1) {
- ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0,
- 250, 1);
- if (ret != -EAGAIN || ++tries == 3)
- break;
- msleep(1);
- }
-
- /* Spec states that TC cold exit can take up to 1ms to complete */
- if (!ret)
- msleep(1);
-
- /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
- drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
- "succeeded");
-}
-
-static void
-icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- bool is_tbt = power_well->desc->hsw.is_tc_tbt;
- bool timeout_expected;
- u32 val;
-
- icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
-
- val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
- val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (is_tbt)
- val |= DP_AUX_CH_CTL_TBT_IO;
- intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
-
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
-
- /*
- * An AUX timeout is expected if the TBT DP tunnel is down,
- * or need to enable AUX on a legacy TypeC port as part of the TC-cold
- * exit sequence.
- */
- timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
- if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
- icl_tc_cold_exit(dev_priv);
-
- hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
-
- if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
- enum tc_port tc_port;
-
- tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
- intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
- HIP_INDEX_VAL(tc_port, 0x2));
-
- if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
- DKL_CMN_UC_DW27_UC_HEALTH, 1))
- drm_warn(&dev_priv->drm,
- "Timeout waiting TC uC health\n");
- }
-}
-
-static void
-icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
-
- icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
-
- hsw_power_well_disable(dev_priv, power_well);
-}
-
-static void
-icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
-
- if (intel_phy_is_tc(dev_priv, phy))
- return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
- else if (IS_ICELAKE(dev_priv))
- return icl_combo_phy_aux_power_well_enable(dev_priv,
- power_well);
- else
- return hsw_power_well_enable(dev_priv, power_well);
-}
-
-static void
-icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
-
- if (intel_phy_is_tc(dev_priv, phy))
- return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
- else if (IS_ICELAKE(dev_priv))
- return icl_combo_phy_aux_power_well_disable(dev_priv,
- power_well);
- else
- return hsw_power_well_disable(dev_priv, power_well);
-}
-
-/*
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- enum i915_power_well_id id = power_well->desc->id;
- int pw_idx = power_well->desc->hsw.idx;
- u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
- HSW_PWR_WELL_CTL_STATE(pw_idx);
- u32 val;
-
- val = intel_de_read(dev_priv, regs->driver);
-
- /*
- * On GEN9 big core due to a DMC bug the driver's request bits for PW1
- * and the MISC_IO PW will be not restored, so check instead for the
- * BIOS's own request bits, which are forced-on for these power wells
- * when exiting DC5/6.
- */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
- (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
- val |= intel_de_read(dev_priv, regs->bios);
-
- return (val & mask) == mask;
-}
-
-static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
-{
- drm_WARN_ONCE(&dev_priv->drm,
- (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be enabled.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, DC_STATE_EN) &
- DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled to enable DC9.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
- HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
- "Power well 2 on.\n");
- drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
-
- /*
- * TODO: check for the following to verify the conditions to enter DC9
- * state are satisfied:
- * 1] Check relevant display engine registers to verify if mode set
- * disable sequence was followed.
- * 2] Check if display uninitialize sequence is initialized.
- */
-}
-
-static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
-{
- drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, DC_STATE_EN) &
- DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled.\n");
-
- /*
- * TODO: check for the following to verify DC9 state was indeed
- * entered before programming to disable it:
- * 1] Check relevant display engine registers to verify if mode
- * set disable sequence was followed.
- * 2] Check if display uninitialize sequence is initialized.
- */
-}
-
-static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
- u32 state)
-{
- int rewrites = 0;
- int rereads = 0;
- u32 v;
-
- intel_de_write(dev_priv, DC_STATE_EN, state);
-
- /* It has been observed that disabling the dc6 state sometimes
- * doesn't stick and dmc keeps returning old value. Make sure
- * the write really sticks enough times and also force rewrite until
- * we are confident that state is exactly what we want.
- */
- do {
- v = intel_de_read(dev_priv, DC_STATE_EN);
-
- if (v != state) {
- intel_de_write(dev_priv, DC_STATE_EN, state);
- rewrites++;
- rereads = 0;
- } else if (rereads++ > 5) {
- break;
- }
-
- } while (rewrites < 100);
-
- if (v != state)
- drm_err(&dev_priv->drm,
- "Writing dc state to 0x%x failed, now 0x%x\n",
- state, v);
-
- /* Most of the times we need one retry, avoid spam */
- if (rewrites > 1)
- drm_dbg_kms(&dev_priv->drm,
- "Rewrote dc state to 0x%x %d times\n",
- state, rewrites);
-}
-
-static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
-{
- u32 mask;
-
- mask = DC_STATE_EN_UPTO_DC5;
-
- if (DISPLAY_VER(dev_priv) >= 12)
- mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
- | DC_STATE_EN_DC9;
- else if (DISPLAY_VER(dev_priv) == 11)
- mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- mask |= DC_STATE_EN_DC9;
- else
- mask |= DC_STATE_EN_UPTO_DC6;
-
- return mask;
-}
-
-static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm,
- "Resetting DC state tracking from %02x to %02x\n",
- dev_priv->dmc.dc_state, val);
- dev_priv->dmc.dc_state = val;
-}
-
-/**
- * gen9_set_dc_state - set target display C power state
- * @dev_priv: i915 device instance
- * @state: target DC power state
- * - DC_STATE_DISABLE
- * - DC_STATE_EN_UPTO_DC5
- * - DC_STATE_EN_UPTO_DC6
- * - DC_STATE_EN_DC9
- *
- * Signal to DMC firmware/HW the target DC power state passed in @state.
- * DMC/HW can turn off individual display clocks and power rails when entering
- * a deeper DC power state (higher in number) and turns these back when exiting
- * that state to a shallower power state (lower in number). The HW will decide
- * when to actually enter a given state on an on-demand basis, for instance
- * depending on the active state of display pipes. The state of display
- * registers backed by affected power rails are saved/restored as needed.
- *
- * Based on the above enabling a deeper DC power state is asynchronous wrt.
- * enabling it. Disabling a deeper power state is synchronous: for instance
- * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
- * back on and register state is restored. This is guaranteed by the MMIO write
- * to DC_STATE_EN blocking until the state is restored.
- */
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
-{
- u32 val;
- u32 mask;
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->dmc.allowed_dc_mask))
- state &= dev_priv->dmc.allowed_dc_mask;
-
- val = intel_de_read(dev_priv, DC_STATE_EN);
- mask = gen9_dc_mask(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
- val & mask, state);
-
- /* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->dmc.dc_state)
- drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->dmc.dc_state, val & mask);
-
- val &= ~mask;
- val |= state;
-
- gen9_write_dc_state(dev_priv, val);
-
- dev_priv->dmc.dc_state = val & mask;
-}
-
static u32
sanitize_target_dc_state(struct drm_i915_private *dev_priv,
u32 target_dc_state)
@@ -1014,88 +277,6 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
return target_dc_state;
}
-static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
-{
- drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
- gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
-}
-
-static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
- val = intel_de_read(dev_priv, DC_STATE_EN);
- val &= ~DC_STATE_DC3CO_STATUS;
- intel_de_write(dev_priv, DC_STATE_EN, val);
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- /*
- * Delay of 200us DC3CO Exit time B.Spec 49196
- */
- usleep_range(200, 210);
-}
-
-static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc9(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
- /*
- * Power sequencer reset is not needed on
- * platforms with South Display Engine on PCH,
- * because PPS registers are always on.
- */
- if (!HAS_PCH_SPLIT(dev_priv))
- intel_pps_reset_all(dev_priv);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
-}
-
-static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
-{
- assert_can_disable_dc9(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
-static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
-{
- drm_WARN_ONCE(&dev_priv->drm,
- !intel_de_read(dev_priv,
- DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
- "DMC program storage start is NULL\n");
- drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
- "DMC SSP Base Not fine\n");
- drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL),
- "DMC HTP Not fine\n");
-}
-
-static struct i915_power_well *
-lookup_power_well(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id)
-{
- struct i915_power_well *power_well;
-
- for_each_power_well(dev_priv, power_well)
- if (power_well->desc->id == power_well_id)
- return power_well;
-
- /*
- * It's not feasible to add error checking code to the callers since
- * this condition really shouldn't happen and it doesn't even make sense
- * to abort things like display initialization sequences. Just return
- * the first power well and hope the WARN gets reported so we can fix
- * our driver.
- */
- drm_WARN(&dev_priv->drm, 1,
- "Power well %d not defined for this platform\n",
- power_well_id);
- return &dev_priv->power_domains.power_wells[0];
-}
-
/**
* intel_display_power_set_target_dc_state - Set target dc state.
* @dev_priv: i915 device
@@ -1123,934 +304,32 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
if (state == dev_priv->dmc.target_dc_state)
goto unlock;
- dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
- power_well);
+ dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
/*
* If DC off power well is disabled, need to enable and disable the
* DC off power well to effect target DC state.
*/
if (!dc_off_enabled)
- power_well->desc->ops->enable(dev_priv, power_well);
+ intel_power_well_enable(dev_priv, power_well);
dev_priv->dmc.target_dc_state = state;
if (!dc_off_enabled)
- power_well->desc->ops->disable(dev_priv, power_well);
+ intel_power_well_disable(dev_priv, power_well);
unlock:
mutex_unlock(&power_domains->lock);
}
-static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
-{
- enum i915_power_well_id high_pg;
-
- /* Power wells at this level and above must be disabled for DC5 entry */
- if (DISPLAY_VER(dev_priv) == 12)
- high_pg = ICL_DISP_PW_3;
- else
- high_pg = SKL_DISP_PW_2;
-
- drm_WARN_ONCE(&dev_priv->drm,
- intel_display_power_well_is_enabled(dev_priv, high_pg),
- "Power wells above platform's DC5 limit still enabled.\n");
-
- drm_WARN_ONCE(&dev_priv->drm,
- (intel_de_read(dev_priv, DC_STATE_EN) &
- DC_STATE_EN_UPTO_DC5),
- "DC5 already programmed to be enabled.\n");
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
-
- assert_dmc_loaded(dev_priv);
-}
-
-static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc5(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
-
- /* Wa Display #1183: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
-
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
-}
-
-static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
-{
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Backlight is not disabled.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- (intel_de_read(dev_priv, DC_STATE_EN) &
- DC_STATE_EN_UPTO_DC6),
- "DC6 already programmed to be enabled.\n");
-
- assert_dmc_loaded(dev_priv);
-}
-
-static void skl_enable_dc6(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc6(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
-
- /* Wa Display #1183: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
-
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
-}
-
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 bios_req = intel_de_read(dev_priv, regs->bios);
-
- /* Take over the request bit if set by BIOS. */
- if (bios_req & mask) {
- u32 drv_req = intel_de_read(dev_priv, regs->driver);
-
- if (!(drv_req & mask))
- intel_de_write(dev_priv, regs->driver, drv_req | mask);
- intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
- }
-}
-
-static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
-}
-
-static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
-}
-
-static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
-}
-
-static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *power_well;
-
- power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
- if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
-
- power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
-
- if (IS_GEMINILAKE(dev_priv)) {
- power_well = lookup_power_well(dev_priv,
- GLK_DISP_PW_DPIO_CMN_C);
- if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- power_well->desc->bxt.phy);
- }
-}
-
-static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
- (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
-}
-
-static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
-{
- u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
-
- drm_WARN(&dev_priv->drm,
- hw_enabled_dbuf_slices != enabled_dbuf_slices,
- "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
- hw_enabled_dbuf_slices,
- enabled_dbuf_slices);
-}
-
-static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_config cdclk_config = {};
-
- if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
- tgl_disable_dc3co(dev_priv);
- return;
- }
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
- /* Can't read out voltage_level so can't use intel_cdclk_changed() */
- drm_WARN_ON(&dev_priv->drm,
- intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
- &cdclk_config));
-
- gen9_assert_dbuf_enabled(dev_priv);
-
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_verify_ddi_phy_power_wells(dev_priv);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- /*
- * DMC retains HW context only for port A, the other combo
- * PHY's HW context for port B is lost after DC transitions,
- * so we need to restore it manually.
- */
- intel_combo_phy_init(dev_priv);
-}
-
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- gen9_disable_dc_states(dev_priv);
-}
-
-static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (!intel_dmc_has_payload(dev_priv))
- return;
-
- switch (dev_priv->dmc.target_dc_state) {
- case DC_STATE_EN_DC3CO:
- tgl_enable_dc3co(dev_priv);
- break;
- case DC_STATE_EN_UPTO_DC6:
- skl_enable_dc6(dev_priv);
- break;
- case DC_STATE_EN_UPTO_DC5:
- gen9_enable_dc5(dev_priv);
- break;
- }
-}
-
-static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
-}
-
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
-}
-
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return true;
-}
-
-static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
- i830_enable_pipe(dev_priv, PIPE_A);
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
- i830_enable_pipe(dev_priv, PIPE_B);
-}
-
-static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- i830_disable_pipe(dev_priv, PIPE_B);
- i830_disable_pipe(dev_priv, PIPE_A);
-}
-
-static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
-}
-
-static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (power_well->count > 0)
- i830_pipes_power_well_enable(dev_priv, power_well);
- else
- i830_pipes_power_well_disable(dev_priv, power_well);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- int pw_idx = power_well->desc->vlv.idx;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(pw_idx);
- state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
- PUNIT_PWRGT_PWR_GATE(pw_idx);
-
- vlv_punit_get(dev_priv);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
- ctrl &= ~mask;
- ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
- if (wait_for(COND, 100))
- drm_err(&dev_priv->drm,
- "timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
-#undef COND
-
-out:
- vlv_punit_put(dev_priv);
-}
-
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-}
-
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- int pw_idx = power_well->desc->vlv.idx;
- bool enabled = false;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(pw_idx);
- ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
-
- vlv_punit_get(dev_priv);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
- state != PUNIT_PWRGT_PWR_GATE(pw_idx));
- if (state == ctrl)
- enabled = true;
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- drm_WARN_ON(&dev_priv->drm, ctrl != state);
-
- vlv_punit_put(dev_priv);
-
- return enabled;
-}
-
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- /*
- * On driver load, a pipe may be active and driving a DSI display.
- * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
- * (and never recovering) in this case. intel_dsi_post_disable() will
- * clear it when we turn off the display.
- */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
- val &= DPOUNIT_CLOCK_GATE_DISABLE;
- val |= VRHUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
-
- /*
- * Disable trickle feed and enable pnd deadline calculation
- */
- intel_de_write(dev_priv, MI_ARB_VLV,
- MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- intel_de_write(dev_priv, CBR1_VLV, 0);
-
- drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
- intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
- 1000));
-}
-
-static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- enum pipe pipe;
-
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection. Supposedly DSI also
- * needs the ref clock up and running.
- *
- * CHV DPLL B/C have some issues if VGA mode is enabled.
- */
- for_each_pipe(dev_priv, pipe) {
- u32 val = intel_de_read(dev_priv, DPLL(pipe));
-
- val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (pipe != PIPE_A)
- val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- intel_de_write(dev_priv, DPLL(pipe), val);
- }
-
- vlv_init_display_clock_gating(dev_priv);
-
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /*
- * During driver initialization/resume we can avoid restoring the
- * part of the HW/SW state that will be inited anyway explicitly.
- */
- if (dev_priv->power_domains.initializing)
- return;
-
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
-
- /* Re-enable the ADPA, if we have one */
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- if (encoder->type == INTEL_OUTPUT_ANALOG)
- intel_crt_reset(&encoder->base);
- }
-
- intel_vga_redisable_power_on(dev_priv);
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
-static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* make sure we're done processing display irqs */
- intel_synchronize_irq(dev_priv);
-
- intel_pps_reset_all(dev_priv);
-
- /* Prevent us from re-enabling polling on accident in late suspend */
- if (!dev_priv->drm.dev->power.is_suspended)
- intel_hpd_poll_enable(dev_priv);
-}
-
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-
- vlv_display_power_well_init(dev_priv);
-}
-
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_display_power_well_deinit(dev_priv);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- /* since ref/cri clock was enabled */
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
- vlv_set_power_well(dev_priv, power_well, true);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe)
- assert_pll_disabled(dev_priv, pipe);
-
- /* Assert common reset */
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
-#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
-
-static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
- u32 phy_control = dev_priv->chv_phy_control;
- u32 phy_status = 0;
- u32 phy_status_mask = 0xffffffff;
-
- /*
- * The BIOS can leave the PHY is some weird state
- * where it doesn't fully power down some parts.
- * Disable the asserts until the PHY has been fully
- * reset (ie. the power well has been disabled at
- * least once).
- */
- if (!dev_priv->chv_phy_assert[DPIO_PHY0])
- phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
- PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
-
- if (!dev_priv->chv_phy_assert[DPIO_PHY1])
- phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
-
- if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
- phy_status |= PHY_POWERGOOD(DPIO_PHY0);
-
- /* this assumes override is only used to enable lanes */
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
-
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
-
- /* CL1 is on whenever anything is on in either channel */
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
-
- /*
- * The DPLLB check accounts for the pipe B + port A usage
- * with CL2 powered up but all the lanes in the second channel
- * powered down.
- */
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
- (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
- }
-
- if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
- phy_status |= PHY_POWERGOOD(DPIO_PHY1);
-
- /* this assumes override is only used to enable lanes */
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
- }
-
- phy_status &= phy_status_mask;
-
- /*
- * The PHY may be busy with some initial calibration and whatnot,
- * so the power state can take a while to actually change.
- */
- if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
- phy_status_mask, phy_status, 10))
- drm_err(&dev_priv->drm,
- "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->chv_phy_control);
-}
-
-#undef BITS_SET
-
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
- enum pipe pipe;
- u32 tmp;
-
- drm_WARN_ON_ONCE(&dev_priv->drm,
- power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- pipe = PIPE_A;
- phy = DPIO_PHY0;
- } else {
- pipe = PIPE_C;
- phy = DPIO_PHY1;
- }
-
- /* since ref/cri clock was enabled */
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
-
- /* Poll for phypwrgood signal */
- if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
- PHY_POWERGOOD(phy), 1))
- drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
- phy);
-
- vlv_dpio_get(dev_priv);
-
- /* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
- tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
- DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
- tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
- } else {
- /*
- * Force the non-existing CL2 off. BXT does this
- * too, so maybe it saves some power even though
- * CL2 doesn't exist?
- */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
- tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
- }
-
- vlv_dpio_put(dev_priv);
-
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
-
- drm_dbg_kms(&dev_priv->drm,
- "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-}
-
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
-
- drm_WARN_ON_ONCE(&dev_priv->drm,
- power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- phy = DPIO_PHY0;
- assert_pll_disabled(dev_priv, PIPE_A);
- assert_pll_disabled(dev_priv, PIPE_B);
- } else {
- phy = DPIO_PHY1;
- assert_pll_disabled(dev_priv, PIPE_C);
- }
-
- dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
-
- vlv_set_power_well(dev_priv, power_well, false);
-
- drm_dbg_kms(&dev_priv->drm,
- "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
-
- /* PHY is fully reset now, so we can enable the PHY state asserts */
- dev_priv->chv_phy_assert[phy] = true;
-
- assert_chv_phy_status(dev_priv);
-}
-
-static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override, unsigned int mask)
-{
- enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
- u32 reg, val, expected, actual;
-
- /*
- * The BIOS can leave the PHY is some weird state
- * where it doesn't fully power down some parts.
- * Disable the asserts until the PHY has been fully
- * reset (ie. the power well has been disabled at
- * least once).
- */
- if (!dev_priv->chv_phy_assert[phy])
- return;
-
- if (ch == DPIO_CH0)
- reg = _CHV_CMN_DW0_CH0;
- else
- reg = _CHV_CMN_DW6_CH1;
-
- vlv_dpio_get(dev_priv);
- val = vlv_dpio_read(dev_priv, pipe, reg);
- vlv_dpio_put(dev_priv);
-
- /*
- * This assumes !override is only used when the port is disabled.
- * All lanes should power down even without the override when
- * the port is disabled.
- */
- if (!override || mask == 0xf) {
- expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
- /*
- * If CH1 common lane is not active anymore
- * (eg. for pipe B DPLL) the entire channel will
- * shut down, which causes the common lane registers
- * to read as 0. That means we can't actually check
- * the lane power down status bits, but as the entire
- * register reads as 0 it's a good indication that the
- * channel is indeed entirely powered down.
- */
- if (ch == DPIO_CH1 && val == 0)
- expected = 0;
- } else if (mask != 0x0) {
- expected = DPIO_ANYDL_POWERDOWN;
- } else {
- expected = 0;
- }
-
- if (ch == DPIO_CH0)
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
- else
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
- actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
-
- drm_WARN(&dev_priv->drm, actual != expected,
- "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
- !!(actual & DPIO_ALLDL_POWERDOWN),
- !!(actual & DPIO_ANYDL_POWERDOWN),
- !!(expected & DPIO_ALLDL_POWERDOWN),
- !!(expected & DPIO_ANYDL_POWERDOWN),
- reg, val);
-}
-
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- bool was_override;
-
- mutex_lock(&power_domains->lock);
-
- was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- if (override == was_override)
- goto out;
-
- if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
-
- drm_dbg_kms(&dev_priv->drm,
- "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-
-out:
- mutex_unlock(&power_domains->lock);
-
- return was_override;
-}
-
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
- bool override, unsigned int mask)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
- enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
-
- mutex_lock(&power_domains->lock);
-
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
-
- if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
-
- drm_dbg_kms(&dev_priv->drm,
- "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-
- assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
-
- mutex_unlock(&power_domains->lock);
-}
-
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe = PIPE_A;
- bool enabled;
- u32 state, ctrl;
-
- vlv_punit_get(dev_priv);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
- state != DP_SSS_PWR_GATE(pipe));
- enabled = state == DP_SSS_PWR_ON(pipe);
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
-
- vlv_punit_put(dev_priv);
-
- return enabled;
-}
-
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- bool enable)
-{
- enum pipe pipe = PIPE_A;
- u32 state;
- u32 ctrl;
-
- state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
-
- vlv_punit_get(dev_priv);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- ctrl &= ~DP_SSC_MASK(pipe);
- ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
-
- if (wait_for(COND, 100))
- drm_err(&dev_priv->drm,
- "timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
-
-#undef COND
-
-out:
- vlv_punit_put(dev_priv);
-}
-
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
-}
-
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- chv_set_pipe_power_well(dev_priv, power_well, true);
-
- vlv_display_power_well_init(dev_priv);
-}
-
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_display_power_well_deinit(dev_priv);
-
- chv_set_pipe_power_well(dev_priv, power_well, false);
-}
-
-static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
+static void __async_put_domains_mask(struct i915_power_domains *power_domains,
+ struct intel_power_domain_mask *mask)
{
- return power_domains->async_put_domains[0] |
- power_domains->async_put_domains[1];
+ bitmap_or(mask->bits,
+ power_domains->async_put_domains[0].bits,
+ power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -2061,8 +340,11 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
power_domains);
- return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
- power_domains->async_put_domains[1]);
+
+ return !drm_WARN_ON(&i915->drm,
+ bitmap_intersects(power_domains->async_put_domains[0].bits,
+ power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM));
}
static bool
@@ -2071,14 +353,17 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
power_domains);
+ struct intel_power_domain_mask async_put_mask;
enum intel_display_power_domain domain;
bool err = false;
err |= !assert_async_put_domain_masks_disjoint(power_domains);
- err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
- !!__async_put_domains_mask(power_domains));
+ __async_put_domains_mask(power_domains, &async_put_mask);
+ err |= drm_WARN_ON(&i915->drm,
+ !!power_domains->async_put_wakeref !=
+ !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
- for_each_power_domain(domain, __async_put_domains_mask(power_domains))
+ for_each_power_domain(domain, &async_put_mask)
err |= drm_WARN_ON(&i915->drm,
power_domains->domain_use_count[domain] != 1);
@@ -2086,14 +371,14 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
}
static void print_power_domains(struct i915_power_domains *power_domains,
- const char *prefix, u64 mask)
+ const char *prefix, struct intel_power_domain_mask *mask)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
power_domains);
enum intel_display_power_domain domain;
- drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
+ drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask)
drm_dbg(&i915->drm, "%s use_count %d\n",
intel_display_power_domain_str(domain),
@@ -2111,9 +396,9 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
power_domains->async_put_wakeref);
print_power_domains(power_domains, "async_put_domains[0]",
- power_domains->async_put_domains[0]);
+ &power_domains->async_put_domains[0]);
print_power_domains(power_domains, "async_put_domains[1]",
- power_domains->async_put_domains[1]);
+ &power_domains->async_put_domains[1]);
}
static void
@@ -2137,11 +422,13 @@ verify_async_put_domains_state(struct i915_power_domains *power_domains)
#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
-static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
+static void async_put_domains_mask(struct i915_power_domains *power_domains,
+ struct intel_power_domain_mask *mask)
+
{
assert_async_put_domain_masks_disjoint(power_domains);
- return __async_put_domains_mask(power_domains);
+ __async_put_domains_mask(power_domains, mask);
}
static void
@@ -2150,8 +437,8 @@ async_put_domains_clear_domain(struct i915_power_domains *power_domains,
{
assert_async_put_domain_masks_disjoint(power_domains);
- power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
- power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
+ clear_bit(domain, power_domains->async_put_domains[0].bits);
+ clear_bit(domain, power_domains->async_put_domains[1].bits);
}
static bool
@@ -2159,16 +446,19 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct intel_power_domain_mask async_put_mask;
bool ret = false;
- if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
+ async_put_domains_mask(power_domains, &async_put_mask);
+ if (!test_bit(domain, async_put_mask.bits))
goto out_verify;
async_put_domains_clear_domain(power_domains, domain);
ret = true;
- if (async_put_domains_mask(power_domains))
+ async_put_domains_mask(power_domains, &async_put_mask);
+ if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
goto out_verify;
cancel_delayed_work(&power_domains->async_put_work);
@@ -2190,7 +480,7 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
if (intel_display_power_grab_async_put_ref(dev_priv, domain))
return;
- for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
+ for_each_power_domain_well(dev_priv, power_well, domain)
intel_power_well_get(dev_priv, power_well);
power_domains->domain_use_count[domain]++;
@@ -2271,20 +561,22 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
const char *name = intel_display_power_domain_str(domain);
+ struct intel_power_domain_mask async_put_mask;
power_domains = &dev_priv->power_domains;
drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
name);
+ async_put_domains_mask(power_domains, &async_put_mask);
drm_WARN(&dev_priv->drm,
- async_put_domains_mask(power_domains) & BIT_ULL(domain),
+ test_bit(domain, async_put_mask.bits),
"Async disabling of domain %s is pending\n",
name);
power_domains->domain_use_count[domain]--;
- for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
+ for_each_power_domain_well_reverse(dev_priv, power_well, domain)
intel_power_well_put(dev_priv, power_well);
}
@@ -2313,7 +605,8 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
}
static void
-release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
+release_async_put_domains(struct i915_power_domains *power_domains,
+ struct intel_power_domain_mask *mask)
{
struct drm_i915_private *dev_priv =
container_of(power_domains, struct drm_i915_private,
@@ -2361,12 +654,15 @@ intel_display_power_put_async_work(struct work_struct *work)
goto out_verify;
release_async_put_domains(power_domains,
- power_domains->async_put_domains[0]);
+ &power_domains->async_put_domains[0]);
/* Requeue the work if more domains were async put meanwhile. */
- if (power_domains->async_put_domains[1]) {
- power_domains->async_put_domains[0] =
- fetch_and_zero(&power_domains->async_put_domains[1]);
+ if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
+ bitmap_copy(power_domains->async_put_domains[0].bits,
+ power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM);
+ bitmap_zero(power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM);
queue_async_put_domains_work(power_domains,
fetch_and_zero(&new_work_wakeref));
} else {
@@ -2418,9 +714,9 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
- power_domains->async_put_domains[1] |= BIT_ULL(domain);
+ set_bit(domain, power_domains->async_put_domains[1].bits);
} else {
- power_domains->async_put_domains[0] |= BIT_ULL(domain);
+ set_bit(domain, power_domains->async_put_domains[0].bits);
queue_async_put_domains_work(power_domains,
fetch_and_zero(&work_wakeref));
}
@@ -2451,6 +747,7 @@ out_verify:
void intel_display_power_flush_work(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->power_domains;
+ struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
mutex_lock(&power_domains->lock);
@@ -2459,8 +756,8 @@ void intel_display_power_flush_work(struct drm_i915_private *i915)
if (!work_wakeref)
goto out_verify;
- release_async_put_domains(power_domains,
- async_put_domains_mask(power_domains));
+ async_put_domains_mask(power_domains, &async_put_mask);
+ release_async_put_domains(power_domains, &async_put_mask);
cancel_delayed_work(&power_domains->async_put_work);
out_verify:
@@ -2539,13 +836,13 @@ intel_display_power_get_in_set(struct drm_i915_private *i915,
{
intel_wakeref_t __maybe_unused wf;
- drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+ drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get(i915, domain);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
power_domain_set->wakerefs[domain] = wf;
#endif
- power_domain_set->mask |= BIT_ULL(domain);
+ set_bit(domain, power_domain_set->mask.bits);
}
bool
@@ -2555,7 +852,7 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
{
intel_wakeref_t wf;
- drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+ drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get_if_enabled(i915, domain);
if (!wf)
@@ -2564,7 +861,7 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
power_domain_set->wakerefs[domain] = wf;
#endif
- power_domain_set->mask |= BIT_ULL(domain);
+ set_bit(domain, power_domain_set->mask.bits);
return true;
}
@@ -2572,11 +869,12 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
void
intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
- u64 mask)
+ struct intel_power_domain_mask *mask)
{
enum intel_display_power_domain domain;
- drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
+ drm_WARN_ON(&i915->drm,
+ !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask) {
intel_wakeref_t __maybe_unused wf = -1;
@@ -2585,2485 +883,10 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
#endif
intel_display_power_put(i915, domain, wf);
- power_domain_set->mask &= ~BIT_ULL(domain);
+ clear_bit(domain, power_domain_set->mask.bits);
}
}
-#define I830_PIPES_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define HSW_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BDW_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * ICL PW_0/PG_0 domains (HW/DMC control):
- * - PCI
- * - clocks except port PLL
- * - central power except FBC
- * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
- * ICL PW_1/PG_1 domains (HW/DMC control):
- * - DBUF function
- * - PIPE_A and its planes, except VGA
- * - transcoder EDP + PSR
- * - transcoder DSI
- * - DDI_A
- * - FBC
- */
-#define ICL_PW_4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /* VDSC/joining */
-#define ICL_PW_3_POWER_DOMAINS ( \
- ICL_PW_4_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_E) | \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /*
- * - transcoder WD
- * - KVMR (HW control)
- */
-#define ICL_PW_2_POWER_DOMAINS ( \
- ICL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /*
- * - KVMR (HW control)
- */
-#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- ICL_PW_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_DC_OFF) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define ICL_DDI_IO_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define ICL_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define ICL_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define ICL_DDI_IO_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
-#define ICL_DDI_IO_E_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
-#define ICL_DDI_IO_F_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
-
-#define ICL_AUX_A_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_A))
-#define ICL_AUX_B_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B))
-#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C))
-#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D))
-#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_E))
-#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F))
-#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
-#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
-#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
-#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
-
-#define TGL_PW_5_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_D) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
- BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define TGL_PW_4_POWER_DOMAINS ( \
- TGL_PW_5_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define TGL_PW_3_POWER_DOMAINS ( \
- TGL_PW_4_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define TGL_PW_2_POWER_DOMAINS ( \
- TGL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- TGL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
-#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
-#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
-#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
-#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5)
-#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6)
-
-#define TGL_AUX_A_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_A))
-#define TGL_AUX_B_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B))
-#define TGL_AUX_C_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C))
-
-#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1)
-#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2)
-#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3)
-#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4)
-#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5)
-#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6)
-
-#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1)
-#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2)
-#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3)
-#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4)
-#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5)
-#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6)
-
-#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
- BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
-
-#define RKL_PW_4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define RKL_PW_3_POWER_DOMAINS ( \
- RKL_PW_4_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * There is no PW_2/PG_2 on RKL.
- *
- * RKL PW_1/PG_1 domains (under HW/DMC control):
- * - DBUF function (note: registers are in PW0)
- * - PIPE_A and its planes and VDSC/joining, except VGA
- * - transcoder A
- * - DDI_A and DDI_B
- * - FBC
- *
- * RKL PW_0/PG_0 domains (under HW/DMC control):
- * - PCI
- * - clocks except port PLL
- * - shared functions:
- * * interrupts except pipe interrupts
- * * MBus except PIPE_MBUS_DBOX_CTL
- * * DBUF registers
- * - central power except FBC
- * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
- */
-
-#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- RKL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * DG1 onwards Audio MMIO/VERBS lies in PG0 power well.
- */
-#define DG1_PW_3_POWER_DOMAINS ( \
- TGL_PW_4_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define DG1_PW_2_POWER_DOMAINS ( \
- DG1_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- DG1_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * XE_LPD Power Domains
- *
- * Previous platforms required that PG(n-1) be enabled before PG(n). That
- * dependency chain turns into a dependency tree on XE_LPD:
- *
- * PG0
- * |
- * --PG1--
- * / \
- * PGA --PG2--
- * / | \
- * PGB PGC PGD
- *
- * Power wells must be enabled from top to bottom and disabled from bottom
- * to top. This allows pipes to be power gated independently.
- */
-
-#define XELPD_PW_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_D) | \
- BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define XELPD_PW_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define XELPD_PW_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define XELPD_PW_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define XELPD_PW_2_POWER_DOMAINS ( \
- XELPD_PW_B_POWER_DOMAINS | \
- XELPD_PW_C_POWER_DOMAINS | \
- XELPD_PW_D_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \
- BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * XELPD PW_1/PG_1 domains (under HW/DMC control):
- * - DBUF function (registers are in PW0)
- * - Transcoder A
- * - DDI_A and DDI_B
- *
- * XELPD PW_0/PW_1 domains (under HW/DMC control):
- * - PCI
- * - Clocks except port PLL
- * - Shared functions:
- * * interrupts except pipe interrupts
- * * MBus except PIPE_MBUS_DBOX_CTL
- * * DBUF registers
- * - Central power except FBC
- * - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
- */
-
-#define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- XELPD_PW_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD)
-#define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD)
-#define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1)
-#define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2)
-#define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3)
-#define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4)
-
-#define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1)
-#define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2)
-#define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3)
-#define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4)
-
-#define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD)
-#define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD)
-#define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
-#define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
-#define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
-#define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
-
-static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = i9xx_always_on_power_well_noop,
- .disable = i9xx_always_on_power_well_noop,
- .is_enabled = i9xx_always_on_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = chv_pipe_power_well_sync_hw,
- .enable = chv_pipe_power_well_enable,
- .disable = chv_pipe_power_well_disable,
- .is_enabled = chv_pipe_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = chv_dpio_cmn_power_well_enable,
- .disable = chv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
-};
-
-static const struct i915_power_well_ops i830_pipes_power_well_ops = {
- .sync_hw = i830_pipes_power_well_sync_hw,
- .enable = i830_pipes_power_well_enable,
- .disable = i830_pipes_power_well_disable,
- .is_enabled = i830_pipes_power_well_enabled,
-};
-
-static const struct i915_power_well_desc i830_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "pipes",
- .domains = I830_PIPES_POWER_DOMAINS,
- .ops = &i830_pipes_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
-};
-
-static const struct i915_power_well_ops hsw_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = hsw_power_well_enable,
- .disable = hsw_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = gen9_dc_off_power_well_enable,
- .disable = gen9_dc_off_power_well_disable,
- .is_enabled = gen9_dc_off_power_well_enabled,
-};
-
-static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = bxt_dpio_cmn_power_well_enable,
- .disable = bxt_dpio_cmn_power_well_disable,
- .is_enabled = bxt_dpio_cmn_power_well_enabled,
-};
-
-static const struct i915_power_well_regs hsw_power_well_regs = {
- .bios = HSW_PWR_WELL_CTL1,
- .driver = HSW_PWR_WELL_CTL2,
- .kvmr = HSW_PWR_WELL_CTL3,
- .debug = HSW_PWR_WELL_CTL4,
-};
-
-static const struct i915_power_well_desc hsw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = HSW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = HSW_DISP_PW_GLOBAL,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
- .hsw.has_vga = true,
- },
- },
-};
-
-static const struct i915_power_well_desc bdw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = BDW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = HSW_DISP_PW_GLOBAL,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- },
- },
-};
-
-static const struct i915_power_well_ops vlv_display_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_display_power_well_enable,
- .disable = vlv_display_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_dpio_cmn_power_well_enable,
- .disable = vlv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_power_well_enable,
- .disable = vlv_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_desc vlv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .ops = &vlv_display_power_well_ops,
- .id = VLV_DISP_PW_DISP2D,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
- },
- },
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
- },
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
- },
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
- },
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
- },
- },
- {
- .name = "dpio-common",
- .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &vlv_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
- },
- },
-};
-
-static const struct i915_power_well_desc chv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- /*
- * Pipe A power well is the new disp2d well. Pipe B and C
- * power wells don't actually exist. Pipe A power well is
- * required for any pipe to work.
- */
- .domains = CHV_DISPLAY_POWER_DOMAINS,
- .ops = &chv_pipe_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "dpio-common-bc",
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &chv_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
- },
- },
- {
- .name = "dpio-common-d",
- .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
- .ops = &chv_dpio_cmn_power_well_ops,
- .id = CHV_DISP_PW_DPIO_CMN_D,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
- },
- },
-};
-
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id)
-{
- struct i915_power_well *power_well;
- bool ret;
-
- power_well = lookup_power_well(dev_priv, power_well_id);
- ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
-
- return ret;
-}
-
-static const struct i915_power_well_desc skl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "MISC IO power well",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_MISC_IO,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
- },
- },
- {
- .name = "DC off",
- .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A/E IO power well",
- .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
- },
- },
- {
- .name = "DDI B IO power well",
- .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO power well",
- .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO power well",
- .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
- },
- },
-};
-
-static const struct i915_power_well_desc bxt_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "dpio-common-a",
- .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DISP_PW_DPIO_CMN_A,
- {
- .bxt.phy = DPIO_PHY1,
- },
- },
- {
- .name = "dpio-common-bc",
- .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .bxt.phy = DPIO_PHY0,
- },
- },
-};
-
-static const struct i915_power_well_desc glk_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "dpio-common-a",
- .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DISP_PW_DPIO_CMN_A,
- {
- .bxt.phy = DPIO_PHY1,
- },
- },
- {
- .name = "dpio-common-b",
- .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .bxt.phy = DPIO_PHY0,
- },
- },
- {
- .name = "dpio-common-c",
- .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = GLK_DISP_PW_DPIO_CMN_C,
- {
- .bxt.phy = DPIO_PHY2,
- },
- },
- {
- .name = "AUX A",
- .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
- },
- },
- {
- .name = "DDI A IO power well",
- .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO power well",
- .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO power well",
- .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
- },
- },
-};
-
-static const struct i915_power_well_ops icl_aux_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = icl_aux_power_well_enable,
- .disable = icl_aux_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_regs icl_aux_power_well_regs = {
- .bios = ICL_PWR_WELL_CTL_AUX1,
- .driver = ICL_PWR_WELL_CTL_AUX2,
- .debug = ICL_PWR_WELL_CTL_AUX4,
-};
-
-static const struct i915_power_well_regs icl_ddi_power_well_regs = {
- .bios = ICL_PWR_WELL_CTL_DDI1,
- .driver = ICL_PWR_WELL_CTL_DDI2,
- .debug = ICL_PWR_WELL_CTL_DDI4,
-};
-
-static const struct i915_power_well_desc icl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = ICL_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 3",
- .domains = ICL_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_3,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO",
- .domains = ICL_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO",
- .domains = ICL_DDI_IO_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
- },
- },
- {
- .name = "DDI E IO",
- .domains = ICL_DDI_IO_E_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
- },
- },
- {
- .name = "DDI F IO",
- .domains = ICL_DDI_IO_F_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
- },
- },
- {
- .name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C TC1",
- .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX D TC2",
- .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX E TC3",
- .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX F TC4",
- .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX C TBT1",
- .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX D TBT2",
- .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX E TBT3",
- .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX F TBT4",
- .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "power well 4",
- .domains = ICL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- },
- },
-};
-
-static void
-tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
-{
- u8 tries = 0;
- int ret;
-
- while (1) {
- u32 low_val;
- u32 high_val = 0;
-
- if (block)
- low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
- else
- low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
-
- /*
- * Spec states that we should timeout the request after 200us
- * but the function below will timeout after 500us
- */
- ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val);
- if (ret == 0) {
- if (block &&
- (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
- ret = -EIO;
- else
- break;
- }
-
- if (++tries == 3)
- break;
-
- msleep(1);
- }
-
- if (ret)
- drm_err(&i915->drm, "TC cold %sblock failed\n",
- block ? "" : "un");
- else
- drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
- block ? "" : "un");
-}
-
-static void
-tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
- struct i915_power_well *power_well)
-{
- tgl_tc_cold_request(i915, true);
-}
-
-static void
-tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
- struct i915_power_well *power_well)
-{
- tgl_tc_cold_request(i915, false);
-}
-
-static void
-tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
- struct i915_power_well *power_well)
-{
- if (power_well->count > 0)
- tgl_tc_cold_off_power_well_enable(i915, power_well);
- else
- tgl_tc_cold_off_power_well_disable(i915, power_well);
-}
-
-static bool
-tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- /*
- * Not the correctly implementation but there is no way to just read it
- * from PCODE, so returning count to avoid state mismatch errors
- */
- return power_well->count;
-}
-
-static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
- .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
- .enable = tgl_tc_cold_off_power_well_enable,
- .disable = tgl_tc_cold_off_power_well_disable,
- .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
-};
-
-static const struct i915_power_well_desc tgl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = TGL_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 3",
- .domains = TGL_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_3,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- }
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- }
- },
- {
- .name = "DDI C IO",
- .domains = ICL_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
- }
- },
- {
- .name = "DDI IO TC1",
- .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
- },
- },
- {
- .name = "DDI IO TC2",
- .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
- },
- },
- {
- .name = "DDI IO TC3",
- .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
- },
- },
- {
- .name = "DDI IO TC4",
- .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
- },
- },
- {
- .name = "DDI IO TC5",
- .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
- },
- },
- {
- .name = "DDI IO TC6",
- .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
- },
- },
- {
- .name = "TC cold off",
- .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
- .ops = &tgl_tc_cold_off_ops,
- .id = TGL_DISP_PW_TC_COLD_OFF,
- },
- {
- .name = "AUX A",
- .domains = TGL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = TGL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = TGL_AUX_C_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
- },
- },
- {
- .name = "AUX USBC1",
- .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC2",
- .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC3",
- .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC4",
- .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC5",
- .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC6",
- .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX TBT1",
- .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT2",
- .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT3",
- .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT4",
- .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT5",
- .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT6",
- .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "power well 4",
- .domains = TGL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- }
- },
- {
- .name = "power well 5",
- .domains = TGL_PW_5_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_PW_5,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_D),
- },
- },
-};
-
-static const struct i915_power_well_desc rkl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 3",
- .domains = RKL_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_3,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 4",
- .domains = RKL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- }
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- }
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- }
- },
- {
- .name = "DDI IO TC1",
- .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
- },
- },
- {
- .name = "DDI IO TC2",
- .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
- },
- },
- {
- .name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX USBC1",
- .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
- },
- },
- {
- .name = "AUX USBC2",
- .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
- },
- },
-};
-
-static const struct i915_power_well_desc dg1_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = DG1_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 3",
- .domains = DG1_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_3,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- }
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- }
- },
- {
- .name = "DDI IO TC1",
- .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
- },
- },
- {
- .name = "DDI IO TC2",
- .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
- },
- },
- {
- .name = "AUX A",
- .domains = TGL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = TGL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX USBC1",
- .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC2",
- .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "power well 4",
- .domains = TGL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- }
- },
- {
- .name = "power well 5",
- .domains = TGL_PW_5_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_PW_5,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_D),
- },
- },
-};
-
-static const struct i915_power_well_desc xelpd_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = XELPD_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well A",
- .domains = XELPD_PW_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = XELPD_PW_CTL_IDX_PW_A,
- .hsw.irq_pipe_mask = BIT(PIPE_A),
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well B",
- .domains = XELPD_PW_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = XELPD_PW_CTL_IDX_PW_B,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well C",
- .domains = XELPD_PW_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = XELPD_PW_CTL_IDX_PW_C,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well D",
- .domains = XELPD_PW_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = XELPD_PW_CTL_IDX_PW_D,
- .hsw.irq_pipe_mask = BIT(PIPE_D),
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- }
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- }
- },
- {
- .name = "DDI C IO",
- .domains = ICL_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
- }
- },
- {
- .name = "DDI IO D_XELPD",
- .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = XELPD_PW_CTL_IDX_DDI_D,
- }
- },
- {
- .name = "DDI IO E_XELPD",
- .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = XELPD_PW_CTL_IDX_DDI_E,
- }
- },
- {
- .name = "DDI IO TC1",
- .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
- }
- },
- {
- .name = "DDI IO TC2",
- .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
- }
- },
- {
- .name = "DDI IO TC3",
- .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
- }
- },
- {
- .name = "DDI IO TC4",
- .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
- }
- },
- {
- .name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- .hsw.fixed_enable_delay = 600,
- },
- },
- {
- .name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- .hsw.fixed_enable_delay = 600,
- },
- },
- {
- .name = "AUX C",
- .domains = TGL_AUX_C_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
- .hsw.fixed_enable_delay = 600,
- },
- },
- {
- .name = "AUX D_XELPD",
- .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = XELPD_PW_CTL_IDX_AUX_D,
- .hsw.fixed_enable_delay = 600,
- },
- },
- {
- .name = "AUX E_XELPD",
- .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = XELPD_PW_CTL_IDX_AUX_E,
- },
- },
- {
- .name = "AUX USBC1",
- .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
- .hsw.fixed_enable_delay = 600,
- },
- },
- {
- .name = "AUX USBC2",
- .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
- },
- },
- {
- .name = "AUX USBC3",
- .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
- },
- },
- {
- .name = "AUX USBC4",
- .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
- },
- },
- {
- .name = "AUX TBT1",
- .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT2",
- .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT3",
- .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT4",
- .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
- .hsw.is_tc_tbt = true,
- },
- },
-};
-
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -5142,57 +965,6 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
return mask;
}
-static int
-__set_power_wells(struct i915_power_domains *power_domains,
- const struct i915_power_well_desc *power_well_descs,
- int power_well_descs_sz, u64 skip_mask)
-{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- power_domains);
- u64 power_well_ids = 0;
- int power_well_count = 0;
- int i, plt_idx = 0;
-
- for (i = 0; i < power_well_descs_sz; i++)
- if (!(BIT_ULL(power_well_descs[i].id) & skip_mask))
- power_well_count++;
-
- power_domains->power_well_count = power_well_count;
- power_domains->power_wells =
- kcalloc(power_well_count,
- sizeof(*power_domains->power_wells),
- GFP_KERNEL);
- if (!power_domains->power_wells)
- return -ENOMEM;
-
- for (i = 0; i < power_well_descs_sz; i++) {
- enum i915_power_well_id id = power_well_descs[i].id;
-
- if (BIT_ULL(id) & skip_mask)
- continue;
-
- power_domains->power_wells[plt_idx++].desc =
- &power_well_descs[i];
-
- if (id == DISP_PW_ID_NONE)
- continue;
-
- drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
- drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
- power_well_ids |= BIT_ULL(id);
- }
-
- return 0;
-}
-
-#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \
- __set_power_wells(power_domains, __power_well_descs, \
- ARRAY_SIZE(__power_well_descs), skip_mask)
-
-#define set_power_wells(power_domains, __power_well_descs) \
- set_power_wells_mask(power_domains, __power_well_descs, 0)
-
/**
* intel_power_domains_init - initializes the power domain structures
* @dev_priv: i915 device instance
@@ -5203,7 +975,6 @@ __set_power_wells(struct i915_power_domains *power_domains,
int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- int err;
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
@@ -5214,54 +985,12 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->dmc.target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
-
mutex_init(&power_domains->lock);
INIT_DELAYED_WORK(&power_domains->async_put_work,
intel_display_power_put_async_work);
- /*
- * The enabling order will be from lower to higher indexed wells,
- * the disabling order is reversed.
- */
- if (!HAS_DISPLAY(dev_priv)) {
- power_domains->power_well_count = 0;
- err = 0;
- } else if (DISPLAY_VER(dev_priv) >= 13) {
- err = set_power_wells(power_domains, xelpd_power_wells);
- } else if (IS_DG1(dev_priv)) {
- err = set_power_wells(power_domains, dg1_power_wells);
- } else if (IS_ALDERLAKE_S(dev_priv)) {
- err = set_power_wells_mask(power_domains, tgl_power_wells,
- BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
- } else if (IS_ROCKETLAKE(dev_priv)) {
- err = set_power_wells(power_domains, rkl_power_wells);
- } else if (DISPLAY_VER(dev_priv) == 12) {
- err = set_power_wells(power_domains, tgl_power_wells);
- } else if (DISPLAY_VER(dev_priv) == 11) {
- err = set_power_wells(power_domains, icl_power_wells);
- } else if (IS_GEMINILAKE(dev_priv)) {
- err = set_power_wells(power_domains, glk_power_wells);
- } else if (IS_BROXTON(dev_priv)) {
- err = set_power_wells(power_domains, bxt_power_wells);
- } else if (DISPLAY_VER(dev_priv) == 9) {
- err = set_power_wells(power_domains, skl_power_wells);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- err = set_power_wells(power_domains, chv_power_wells);
- } else if (IS_BROADWELL(dev_priv)) {
- err = set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_HASWELL(dev_priv)) {
- err = set_power_wells(power_domains, hsw_power_wells);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- err = set_power_wells(power_domains, vlv_power_wells);
- } else if (IS_I830(dev_priv)) {
- err = set_power_wells(power_domains, i830_power_wells);
- } else {
- err = set_power_wells(power_domains, i9xx_always_on_power_well);
- }
-
- return err;
+ return intel_display_power_map_init(power_domains);
}
/**
@@ -5272,7 +1001,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
{
- kfree(dev_priv->power_domains.power_wells);
+ intel_display_power_map_cleanup(&dev_priv->power_domains);
}
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
@@ -5281,11 +1010,8 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
- for_each_power_well(dev_priv, power_well) {
- power_well->desc->ops->sync_hw(dev_priv, power_well);
- power_well->hw_enabled =
- power_well->desc->ops->is_enabled(dev_priv, power_well);
- }
+ for_each_power_well(dev_priv, power_well)
+ intel_power_well_sync_hw(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
}
@@ -5303,7 +1029,7 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
drm_WARN(&dev_priv->drm, enable != state,
"DBuf slice %d power %s timeout!\n",
- slice, enabledisable(enable));
+ slice, str_enable_disable(enable));
}
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
@@ -5692,7 +1418,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
gen9_dbuf_enable(dev_priv);
- if (resume && intel_dmc_has_payload(dev_priv))
+ if (resume)
intel_dmc_load_program(dev_priv);
}
@@ -5759,7 +1485,7 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
gen9_dbuf_enable(dev_priv);
- if (resume && intel_dmc_has_payload(dev_priv))
+ if (resume)
intel_dmc_load_program(dev_priv);
}
@@ -5923,7 +1649,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
if (IS_DG2(dev_priv))
intel_snps_phy_wait_for_calibration(dev_priv);
- if (resume && intel_dmc_has_payload(dev_priv))
+ if (resume)
intel_dmc_load_program(dev_priv);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
@@ -5998,7 +1724,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* override and set the lane powerdown bits accding to the
* current lane status.
*/
- if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
+ if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
unsigned int mask;
@@ -6029,7 +1755,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
dev_priv->chv_phy_assert[DPIO_PHY0] = true;
}
- if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
+ if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
unsigned int mask;
@@ -6065,15 +1791,15 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
/* If the display might be already active skip this */
- if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
- disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
+ if (intel_power_well_is_enabled(dev_priv, cmn) &&
+ intel_power_well_is_enabled(dev_priv, disp2d) &&
intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
return;
drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
/* cmnlane needs DPLL registers */
- disp2d->desc->ops->enable(dev_priv, disp2d);
+ intel_power_well_enable(dev_priv, disp2d);
/*
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
@@ -6082,7 +1808,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
- cmn->desc->ops->disable(dev_priv, cmn);
+ intel_power_well_disable(dev_priv, cmn);
}
static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
@@ -6233,12 +1959,12 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
for_each_power_well_reverse(i915, power_well) {
if (power_well->desc->always_on || power_well->count ||
- !power_well->desc->ops->is_enabled(i915, power_well))
+ !intel_power_well_is_enabled(i915, power_well))
continue;
drm_dbg_kms(&i915->drm,
"BIOS left unused %s power well enabled, disabling it\n",
- power_well->desc->name);
+ intel_power_well_name(power_well));
intel_power_well_disable(i915, power_well);
}
@@ -6377,9 +2103,9 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
enum intel_display_power_domain domain;
drm_dbg(&i915->drm, "%-25s %d\n",
- power_well->desc->name, power_well->count);
+ intel_power_well_name(power_well), intel_power_well_refcount(power_well));
- for_each_power_domain(domain, power_well->desc->domains)
+ for_each_power_domain(domain, intel_power_well_domains(power_well))
drm_dbg(&i915->drm, " %-23s %d\n",
intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
@@ -6412,23 +2138,25 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
int domains_count;
bool enabled;
- enabled = power_well->desc->ops->is_enabled(i915, power_well);
- if ((power_well->count || power_well->desc->always_on) !=
+ enabled = intel_power_well_is_enabled(i915, power_well);
+ if ((intel_power_well_refcount(power_well) ||
+ intel_power_well_is_always_on(power_well)) !=
enabled)
drm_err(&i915->drm,
"power well %s state mismatch (refcount %d/enabled %d)",
- power_well->desc->name,
- power_well->count, enabled);
+ intel_power_well_name(power_well),
+ intel_power_well_refcount(power_well), enabled);
domains_count = 0;
- for_each_power_domain(domain, power_well->desc->domains)
+ for_each_power_domain(domain, intel_power_well_domains(power_well))
domains_count += power_domains->domain_use_count[domain];
- if (power_well->count != domains_count) {
+ if (intel_power_well_refcount(power_well) != domains_count) {
drm_err(&i915->drm,
"power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n",
- power_well->desc->name, power_well->count,
+ intel_power_well_name(power_well),
+ intel_power_well_refcount(power_well),
domains_count);
dump_domain_info = true;
}
@@ -6533,10 +2261,10 @@ void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m
enum intel_display_power_domain power_domain;
power_well = &power_domains->power_wells[i];
- seq_printf(m, "%-25s %d\n", power_well->desc->name,
- power_well->count);
+ seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
+ intel_power_well_refcount(power_well));
- for_each_power_domain(power_domain, power_well->desc->domains)
+ for_each_power_domain(power_domain, intel_power_well_domains(power_well))
seq_printf(m, " %-23s %d\n",
intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
@@ -6544,3 +2272,209 @@ void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m
mutex_unlock(&power_domains->lock);
}
+
+struct intel_ddi_port_domains {
+ enum port port_start;
+ enum port port_end;
+ enum aux_ch aux_ch_start;
+ enum aux_ch aux_ch_end;
+
+ enum intel_display_power_domain ddi_lanes;
+ enum intel_display_power_domain ddi_io;
+ enum intel_display_power_domain aux_legacy_usbc;
+ enum intel_display_power_domain aux_tbt;
+};
+
+static const struct intel_ddi_port_domains
+i9xx_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_F,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_F,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ },
+};
+
+static const struct intel_ddi_port_domains
+d11_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_B,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_B,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ }, {
+ .port_start = PORT_C,
+ .port_end = PORT_F,
+ .aux_ch_start = AUX_CH_C,
+ .aux_ch_end = AUX_CH_F,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
+ .aux_tbt = POWER_DOMAIN_AUX_TBT1,
+ },
+};
+
+static const struct intel_ddi_port_domains
+d12_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_C,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_C,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ }, {
+ .port_start = PORT_TC1,
+ .port_end = PORT_TC6,
+ .aux_ch_start = AUX_CH_USBC1,
+ .aux_ch_end = AUX_CH_USBC6,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
+ .aux_tbt = POWER_DOMAIN_AUX_TBT1,
+ },
+};
+
+static const struct intel_ddi_port_domains
+d13_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_C,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_C,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ }, {
+ .port_start = PORT_TC1,
+ .port_end = PORT_TC4,
+ .aux_ch_start = AUX_CH_USBC1,
+ .aux_ch_end = AUX_CH_USBC4,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
+ .aux_tbt = POWER_DOMAIN_AUX_TBT1,
+ }, {
+ .port_start = PORT_D_XELPD,
+ .port_end = PORT_E_XELPD,
+ .aux_ch_start = AUX_CH_D_XELPD,
+ .aux_ch_end = AUX_CH_E_XELPD,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ },
+};
+
+static void
+intel_port_domains_for_platform(struct drm_i915_private *i915,
+ const struct intel_ddi_port_domains **domains,
+ int *domains_size)
+{
+ if (DISPLAY_VER(i915) >= 13) {
+ *domains = d13_port_domains;
+ *domains_size = ARRAY_SIZE(d13_port_domains);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ *domains = d12_port_domains;
+ *domains_size = ARRAY_SIZE(d12_port_domains);
+ } else if (DISPLAY_VER(i915) >= 11) {
+ *domains = d11_port_domains;
+ *domains_size = ARRAY_SIZE(d11_port_domains);
+ } else {
+ *domains = i9xx_port_domains;
+ *domains_size = ARRAY_SIZE(i9xx_port_domains);
+ }
+}
+
+static const struct intel_ddi_port_domains *
+intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_ddi_port_domains *domains;
+ int domains_size;
+ int i;
+
+ intel_port_domains_for_platform(i915, &domains, &domains_size);
+ for (i = 0; i < domains_size; i++)
+ if (port >= domains[i].port_start && port <= domains[i].port_end)
+ return &domains[i];
+
+ return NULL;
+}
+
+enum intel_display_power_domain
+intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+
+ if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID)
+ return POWER_DOMAIN_PORT_DDI_IO_A;
+
+ return domains->ddi_io + (int)(port - domains->port_start);
+}
+
+enum intel_display_power_domain
+intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+
+ if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID)
+ return POWER_DOMAIN_PORT_DDI_LANES_A;
+
+ return domains->ddi_lanes + (int)(port - domains->port_start);
+}
+
+static const struct intel_ddi_port_domains *
+intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
+{
+ const struct intel_ddi_port_domains *domains;
+ int domains_size;
+ int i;
+
+ intel_port_domains_for_platform(i915, &domains, &domains_size);
+ for (i = 0; i < domains_size; i++)
+ if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
+ return &domains[i];
+
+ return NULL;
+}
+
+enum intel_display_power_domain
+intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+
+ if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)
+ return POWER_DOMAIN_AUX_A;
+
+ return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
+}
+
+enum intel_display_power_domain
+intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+
+ if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID)
+ return POWER_DOMAIN_AUX_TBT1;
+
+ return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index f6d0e6e73c6d..7136ea3f233e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -8,71 +8,68 @@
#include "intel_runtime_pm.h"
+enum aux_ch;
enum dpio_channel;
enum dpio_phy;
+enum port;
struct drm_i915_private;
struct i915_power_well;
struct intel_encoder;
+/*
+ * Keep the pipe, transcoder, port (DDI_LANES,DDI_IO,AUX) domain instances
+ * consecutive, so that the pipe,transcoder,port -> power domain macros
+ * work correctly.
+ */
enum intel_display_power_domain {
POWER_DOMAIN_DISPLAY_CORE,
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
POWER_DOMAIN_PIPE_C,
POWER_DOMAIN_PIPE_D,
- POWER_DOMAIN_PIPE_A_PANEL_FITTER,
- POWER_DOMAIN_PIPE_B_PANEL_FITTER,
- POWER_DOMAIN_PIPE_C_PANEL_FITTER,
- POWER_DOMAIN_PIPE_D_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_D,
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_D,
POWER_DOMAIN_TRANSCODER_EDP,
- /* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */
- POWER_DOMAIN_TRANSCODER_VDSC_PW2,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
- POWER_DOMAIN_PORT_DDI_A_LANES,
- POWER_DOMAIN_PORT_DDI_B_LANES,
- POWER_DOMAIN_PORT_DDI_C_LANES,
- POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_F_LANES,
- POWER_DOMAIN_PORT_DDI_G_LANES,
- POWER_DOMAIN_PORT_DDI_H_LANES,
- POWER_DOMAIN_PORT_DDI_I_LANES,
-
- POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_D_LANES, /* tgl+ */
+
+ /* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
+
+ POWER_DOMAIN_PORT_DDI_LANES_A,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_DDI_LANES_E,
+ POWER_DOMAIN_PORT_DDI_LANES_F,
+
+ POWER_DOMAIN_PORT_DDI_LANES_TC1,
POWER_DOMAIN_PORT_DDI_LANES_TC2,
POWER_DOMAIN_PORT_DDI_LANES_TC3,
POWER_DOMAIN_PORT_DDI_LANES_TC4,
POWER_DOMAIN_PORT_DDI_LANES_TC5,
POWER_DOMAIN_PORT_DDI_LANES_TC6,
- POWER_DOMAIN_PORT_DDI_LANES_D_XELPD = POWER_DOMAIN_PORT_DDI_LANES_TC5, /* XELPD */
- POWER_DOMAIN_PORT_DDI_LANES_E_XELPD,
-
- POWER_DOMAIN_PORT_DDI_A_IO,
- POWER_DOMAIN_PORT_DDI_B_IO,
- POWER_DOMAIN_PORT_DDI_C_IO,
- POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DDI_F_IO,
- POWER_DOMAIN_PORT_DDI_G_IO,
- POWER_DOMAIN_PORT_DDI_H_IO,
- POWER_DOMAIN_PORT_DDI_I_IO,
+ POWER_DOMAIN_PORT_DDI_IO_A,
+ POWER_DOMAIN_PORT_DDI_IO_B,
+ POWER_DOMAIN_PORT_DDI_IO_C,
+ POWER_DOMAIN_PORT_DDI_IO_D,
+ POWER_DOMAIN_PORT_DDI_IO_E,
+ POWER_DOMAIN_PORT_DDI_IO_F,
- POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_D_IO, /* tgl+ */
+ POWER_DOMAIN_PORT_DDI_IO_TC1,
POWER_DOMAIN_PORT_DDI_IO_TC2,
POWER_DOMAIN_PORT_DDI_IO_TC3,
POWER_DOMAIN_PORT_DDI_IO_TC4,
POWER_DOMAIN_PORT_DDI_IO_TC5,
POWER_DOMAIN_PORT_DDI_IO_TC6,
- POWER_DOMAIN_PORT_DDI_IO_D_XELPD = POWER_DOMAIN_PORT_DDI_IO_TC5, /* XELPD */
- POWER_DOMAIN_PORT_DDI_IO_E_XELPD,
-
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -85,30 +82,17 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
- POWER_DOMAIN_AUX_G,
- POWER_DOMAIN_AUX_H,
- POWER_DOMAIN_AUX_I,
- POWER_DOMAIN_AUX_USBC1 = POWER_DOMAIN_AUX_D, /* tgl+ */
+ POWER_DOMAIN_AUX_USBC1,
POWER_DOMAIN_AUX_USBC2,
POWER_DOMAIN_AUX_USBC3,
POWER_DOMAIN_AUX_USBC4,
POWER_DOMAIN_AUX_USBC5,
POWER_DOMAIN_AUX_USBC6,
- POWER_DOMAIN_AUX_D_XELPD = POWER_DOMAIN_AUX_USBC5, /* XELPD */
- POWER_DOMAIN_AUX_E_XELPD,
-
POWER_DOMAIN_AUX_IO_A,
- POWER_DOMAIN_AUX_C_TBT,
- POWER_DOMAIN_AUX_D_TBT,
- POWER_DOMAIN_AUX_E_TBT,
- POWER_DOMAIN_AUX_F_TBT,
- POWER_DOMAIN_AUX_G_TBT,
- POWER_DOMAIN_AUX_H_TBT,
- POWER_DOMAIN_AUX_I_TBT,
-
- POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_D_TBT, /* tgl+ */
+
+ POWER_DOMAIN_AUX_TBT1,
POWER_DOMAIN_AUX_TBT2,
POWER_DOMAIN_AUX_TBT3,
POWER_DOMAIN_AUX_TBT4,
@@ -123,39 +107,20 @@ enum intel_display_power_domain {
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
-};
-
-/*
- * i915_power_well_id:
- *
- * IDs used to look up power wells. Power wells accessed directly bypassing
- * the power domains framework must be assigned a unique ID. The rest of power
- * wells must be assigned DISP_PW_ID_NONE.
- */
-enum i915_power_well_id {
- DISP_PW_ID_NONE,
-
- VLV_DISP_PW_DISP2D,
- BXT_DISP_PW_DPIO_CMN_A,
- VLV_DISP_PW_DPIO_CMN_BC,
- GLK_DISP_PW_DPIO_CMN_C,
- CHV_DISP_PW_DPIO_CMN_D,
- HSW_DISP_PW_GLOBAL,
- SKL_DISP_PW_MISC_IO,
- SKL_DISP_PW_1,
- SKL_DISP_PW_2,
- ICL_DISP_PW_3,
- SKL_DISP_DC_OFF,
- TGL_DISP_PW_TC_COLD_OFF,
+ POWER_DOMAIN_INVALID = POWER_DOMAIN_NUM,
};
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+ ((pipe) + POWER_DOMAIN_PIPE_PANEL_FITTER_A)
#define POWER_DOMAIN_TRANSCODER(tran) \
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
(tran) + POWER_DOMAIN_TRANSCODER_A)
+struct intel_power_domain_mask {
+ DECLARE_BITMAP(bits, POWER_DOMAIN_NUM);
+};
+
struct i915_power_domains {
/*
* Power wells needed for initialization at driver init and suspend
@@ -173,41 +138,21 @@ struct i915_power_domains {
struct delayed_work async_put_work;
intel_wakeref_t async_put_wakeref;
- u64 async_put_domains[2];
+ struct intel_power_domain_mask async_put_domains[2];
struct i915_power_well *power_wells;
};
struct intel_display_power_domain_set {
- u64 mask;
+ struct intel_power_domain_mask mask;
#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
#endif
};
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if(BIT_ULL(domain) & (mask))
-
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
- (__power_well)++)
-
-#define for_each_power_well_reverse(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
- (__power_well)--)
-
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well(__dev_priv, __power_well) \
- for_each_if((__power_well)->desc->domains & (__domain_mask))
-
-#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_reverse(__dev_priv, __power_well) \
- for_each_if((__power_well)->desc->domains & (__domain_mask))
+#define for_each_power_domain(__domain, __mask) \
+ for ((__domain) = 0; (__domain) < POWER_DOMAIN_NUM; (__domain)++) \
+ for_each_if(test_bit((__domain), (__mask)->bits))
int intel_power_domains_init(struct drm_i915_private *dev_priv);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
@@ -232,8 +177,6 @@ intel_display_power_domain_str(enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id);
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
@@ -290,17 +233,26 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
void
intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
- u64 mask);
+ struct intel_power_domain_mask *mask);
static inline void
intel_display_power_put_all_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set)
{
- intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
+ intel_display_power_put_mask_in_set(i915, power_domain_set, &power_domain_set->mask);
}
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m);
+enum intel_display_power_domain
+intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port);
+enum intel_display_power_domain
+intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port);
+enum intel_display_power_domain
+intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+enum intel_display_power_domain
+intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+
/*
* FIXME: We should probably switch this to a 0-based scheme to be consistent
* with how we now name/number DBUF_CTL instances.
@@ -324,9 +276,4 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
- bool override, unsigned int mask);
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override);
-
#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
new file mode 100644
index 000000000000..97b367f39f35
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -0,0 +1,1501 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+#include "vlv_sideband_reg.h"
+
+#include "intel_display_power_map.h"
+#include "intel_display_power_well.h"
+
+#define __LIST_INLINE_ELEMS(__elem_type, ...) \
+ ((__elem_type[]) { __VA_ARGS__ })
+
+#define __LIST(__elems) { \
+ .list = __elems, \
+ .count = ARRAY_SIZE(__elems), \
+}
+
+#define I915_PW_DOMAINS(...) \
+ (const struct i915_power_domain_list) \
+ __LIST(__LIST_INLINE_ELEMS(const enum intel_display_power_domain, __VA_ARGS__))
+
+#define I915_DECL_PW_DOMAINS(__name, ...) \
+ static const struct i915_power_domain_list __name = I915_PW_DOMAINS(__VA_ARGS__)
+
+/* Zero-length list assigns all power domains, a NULL list assigns none. */
+#define I915_PW_DOMAINS_NONE NULL
+#define I915_PW_DOMAINS_ALL /* zero-length list */
+
+#define I915_PW_INSTANCES(...) \
+ (const struct i915_power_well_instance_list) \
+ __LIST(__LIST_INLINE_ELEMS(const struct i915_power_well_instance, __VA_ARGS__))
+
+#define I915_PW(_name, _domain_list, ...) \
+ { .name = _name, .domain_list = _domain_list, ## __VA_ARGS__ }
+
+
+struct i915_power_well_desc_list {
+ const struct i915_power_well_desc *list;
+ u8 count;
+};
+
+#define I915_PW_DESCRIPTORS(x) __LIST(x)
+
+
+I915_DECL_PW_DOMAINS(i9xx_pwdoms_always_on, I915_PW_DOMAINS_ALL);
+
+static const struct i915_power_well_desc i9xx_power_wells_always_on[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("always-on", &i9xx_pwdoms_always_on),
+ ),
+ .ops = &i9xx_always_on_power_well_ops,
+ .always_on = true,
+ },
+};
+
+static const struct i915_power_well_desc_list i9xx_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+};
+
+I915_DECL_PW_DOMAINS(i830_pwdoms_pipes,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc i830_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("pipes", &i830_pwdoms_pipes),
+ ),
+ .ops = &i830_pipes_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list i830_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(i830_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(hsw_pwdoms_display,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_CRT, /* DDI E */
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc hsw_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &hsw_pwdoms_display,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+ .id = HSW_DISP_PW_GLOBAL),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ },
+};
+
+static const struct i915_power_well_desc_list hsw_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(hsw_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(bdw_pwdoms_display,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_CRT, /* DDI E */
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc bdw_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &bdw_pwdoms_display,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+ .id = HSW_DISP_PW_GLOBAL),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ },
+};
+
+static const struct i915_power_well_desc_list bdw_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(bdw_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(vlv_pwdoms_display,
+ POWER_DOMAIN_DISPLAY_CORE,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_cmn_bc,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_tx_bc_lanes,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc vlv_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &vlv_pwdoms_display,
+ .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
+ .id = VLV_DISP_PW_DISP2D),
+ ),
+ .ops = &vlv_display_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-tx-b-01", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01),
+ I915_PW("dpio-tx-b-23", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23),
+ I915_PW("dpio-tx-c-01", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01),
+ I915_PW("dpio-tx-c-23", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23),
+ ),
+ .ops = &vlv_dpio_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common", &vlv_pwdoms_dpio_cmn_bc,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ ),
+ .ops = &vlv_dpio_cmn_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list vlv_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(vlv_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(chv_pwdoms_display,
+ POWER_DOMAIN_DISPLAY_CORE,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_bc,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_d,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc chv_power_wells_main[] = {
+ {
+ /*
+ * Pipe A power well is the new disp2d well. Pipe B and C
+ * power wells don't actually exist. Pipe A power well is
+ * required for any pipe to work.
+ */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &chv_pwdoms_display),
+ ),
+ .ops = &chv_pipe_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common-bc", &chv_pwdoms_dpio_cmn_bc,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ I915_PW("dpio-common-d", &chv_pwdoms_dpio_cmn_d,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
+ .id = CHV_DISP_PW_DPIO_CMN_D),
+ ),
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list chv_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(chv_power_wells_main),
+};
+
+#define SKL_PW_2_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C, \
+ POWER_DOMAIN_AUX_D
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_pw_2,
+ SKL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_dc_off,
+ SKL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_a_e,
+ POWER_DOMAIN_PORT_DDI_IO_A,
+ POWER_DOMAIN_PORT_DDI_IO_E,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_b,
+ POWER_DOMAIN_PORT_DDI_IO_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_c,
+ POWER_DOMAIN_PORT_DDI_IO_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_d,
+ POWER_DOMAIN_PORT_DDI_IO_D,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc skl_power_wells_pw_1[] = {
+ {
+ /* Handled by the DMC firmware */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_1", I915_PW_DOMAINS_NONE,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .id = SKL_DISP_PW_1),
+ ),
+ .ops = &hsw_power_well_ops,
+ .always_on = true,
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc skl_power_wells_main[] = {
+ {
+ /* Handled by the DMC firmware */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("MISC_IO", I915_PW_DOMAINS_NONE,
+ .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
+ .id = SKL_DISP_PW_MISC_IO),
+ ),
+ .ops = &hsw_power_well_ops,
+ .always_on = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &skl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &skl_pwdoms_pw_2,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A_E", &skl_pwdoms_ddi_io_a_e, .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E),
+ I915_PW("DDI_IO_B", &skl_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &skl_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_D", &skl_pwdoms_ddi_io_d, .hsw.idx = SKL_PW_CTL_IDX_DDI_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list skl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(skl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(skl_power_wells_main),
+};
+
+#define BXT_PW_2_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_pw_2,
+ BXT_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_dc_off,
+ BXT_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_a,
+ POWER_DOMAIN_PORT_DDI_LANES_A,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_bc,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc bxt_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &bxt_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &bxt_pwdoms_pw_2,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common-a", &bxt_pwdoms_dpio_cmn_a,
+ .bxt.phy = DPIO_PHY1,
+ .id = BXT_DISP_PW_DPIO_CMN_A),
+ I915_PW("dpio-common-bc", &bxt_pwdoms_dpio_cmn_bc,
+ .bxt.phy = DPIO_PHY0,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ ),
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list bxt_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(skl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(bxt_power_wells_main),
+};
+
+#define GLK_PW_2_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_pw_2,
+ GLK_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dc_off,
+ GLK_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_a, POWER_DOMAIN_PORT_DDI_IO_A);
+I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_b, POWER_DOMAIN_PORT_DDI_IO_B);
+I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_c, POWER_DOMAIN_PORT_DDI_IO_C);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_a,
+ POWER_DOMAIN_PORT_DDI_LANES_A,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_b,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_c,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_aux_a,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_IO_A,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_aux_b,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_aux_c,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc glk_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &glk_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &glk_pwdoms_pw_2,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common-a", &glk_pwdoms_dpio_cmn_a,
+ .bxt.phy = DPIO_PHY1,
+ .id = BXT_DISP_PW_DPIO_CMN_A),
+ I915_PW("dpio-common-b", &glk_pwdoms_dpio_cmn_b,
+ .bxt.phy = DPIO_PHY0,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ I915_PW("dpio-common-c", &glk_pwdoms_dpio_cmn_c,
+ .bxt.phy = DPIO_PHY2,
+ .id = GLK_DISP_PW_DPIO_CMN_C),
+ ),
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &glk_pwdoms_aux_a, .hsw.idx = GLK_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &glk_pwdoms_aux_b, .hsw.idx = GLK_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &glk_pwdoms_aux_c, .hsw.idx = GLK_PW_CTL_IDX_AUX_C),
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = GLK_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list glk_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(skl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(glk_power_wells_main),
+};
+
+/*
+ * ICL PW_0/PG_0 domains (HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - central power except FBC
+ * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
+ * ICL PW_1/PG_1 domains (HW/DMC control):
+ * - DBUF function
+ * - PIPE_A and its planes, except VGA
+ * - transcoder EDP + PSR
+ * - transcoder DSI
+ * - DDI_A
+ * - FBC
+ */
+#define ICL_PW_4_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_pw_4,
+ ICL_PW_4_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+ /* VDSC/joining */
+
+#define ICL_PW_3_POWER_DOMAINS \
+ ICL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
+ POWER_DOMAIN_PORT_DDI_LANES_F, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C, \
+ POWER_DOMAIN_AUX_D, \
+ POWER_DOMAIN_AUX_E, \
+ POWER_DOMAIN_AUX_F, \
+ POWER_DOMAIN_AUX_TBT1, \
+ POWER_DOMAIN_AUX_TBT2, \
+ POWER_DOMAIN_AUX_TBT3, \
+ POWER_DOMAIN_AUX_TBT4
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_pw_3,
+ ICL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+ /*
+ * - transcoder WD
+ * - KVMR (HW control)
+ */
+
+#define ICL_PW_2_POWER_DOMAINS \
+ ICL_PW_3_POWER_DOMAINS, \
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_pw_2,
+ ICL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+ /*
+ * - KVMR (HW control)
+ */
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_dc_off,
+ ICL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_DC_OFF,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_d, POWER_DOMAIN_PORT_DDI_IO_D);
+I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_e, POWER_DOMAIN_PORT_DDI_IO_E);
+I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_f, POWER_DOMAIN_PORT_DDI_IO_F);
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_a,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_IO_A);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_b, POWER_DOMAIN_AUX_B);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_c, POWER_DOMAIN_AUX_C);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_d, POWER_DOMAIN_AUX_D);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_e, POWER_DOMAIN_AUX_E);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_f, POWER_DOMAIN_AUX_F);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT1);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT2);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT3);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT4);
+
+static const struct i915_power_well_desc icl_power_wells_pw_1[] = {
+ {
+ /* Handled by the DMC firmware */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_1", I915_PW_DOMAINS_NONE,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .id = SKL_DISP_PW_1),
+ ),
+ .ops = &hsw_power_well_ops,
+ .always_on = true,
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc icl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &icl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &icl_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &icl_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = ICL_PW_CTL_IDX_DDI_D),
+ I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = ICL_PW_CTL_IDX_DDI_E),
+ I915_PW("DDI_IO_F", &icl_pwdoms_ddi_io_f, .hsw.idx = ICL_PW_CTL_IDX_DDI_F),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C),
+ I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = ICL_PW_CTL_IDX_AUX_D),
+ I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = ICL_PW_CTL_IDX_AUX_E),
+ I915_PW("AUX_F", &icl_pwdoms_aux_f, .hsw.idx = ICL_PW_CTL_IDX_AUX_F),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1),
+ I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2),
+ I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3),
+ I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .is_tc_tbt = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &icl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc_list icl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(icl_power_wells_main),
+};
+
+#define TGL_PW_5_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_D, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_D, \
+ POWER_DOMAIN_TRANSCODER_D
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_5,
+ TGL_PW_5_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define TGL_PW_4_POWER_DOMAINS \
+ TGL_PW_5_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_C
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_4,
+ TGL_PW_4_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define TGL_PW_3_POWER_DOMAINS \
+ TGL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC5, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC6, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2, \
+ POWER_DOMAIN_AUX_USBC3, \
+ POWER_DOMAIN_AUX_USBC4, \
+ POWER_DOMAIN_AUX_USBC5, \
+ POWER_DOMAIN_AUX_USBC6, \
+ POWER_DOMAIN_AUX_TBT1, \
+ POWER_DOMAIN_AUX_TBT2, \
+ POWER_DOMAIN_AUX_TBT3, \
+ POWER_DOMAIN_AUX_TBT4, \
+ POWER_DOMAIN_AUX_TBT5, \
+ POWER_DOMAIN_AUX_TBT6
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_3,
+ TGL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_2,
+ TGL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_dc_off,
+ TGL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc1, POWER_DOMAIN_PORT_DDI_IO_TC1);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc2, POWER_DOMAIN_PORT_DDI_IO_TC2);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc3, POWER_DOMAIN_PORT_DDI_IO_TC3);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc5, POWER_DOMAIN_PORT_DDI_IO_TC5);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc6, POWER_DOMAIN_PORT_DDI_IO_TC6);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc1, POWER_DOMAIN_AUX_USBC1);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc2, POWER_DOMAIN_AUX_USBC2);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc3, POWER_DOMAIN_AUX_USBC3);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc4, POWER_DOMAIN_AUX_USBC4);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc5, POWER_DOMAIN_AUX_USBC5);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc6, POWER_DOMAIN_AUX_USBC6);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt5, POWER_DOMAIN_AUX_TBT5);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt6, POWER_DOMAIN_AUX_TBT6);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_tc_cold_off,
+ POWER_DOMAIN_AUX_USBC1,
+ POWER_DOMAIN_AUX_USBC2,
+ POWER_DOMAIN_AUX_USBC3,
+ POWER_DOMAIN_AUX_USBC4,
+ POWER_DOMAIN_AUX_USBC5,
+ POWER_DOMAIN_AUX_USBC6,
+ POWER_DOMAIN_AUX_TBT1,
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_AUX_TBT5,
+ POWER_DOMAIN_AUX_TBT6,
+ POWER_DOMAIN_TC_COLD_OFF);
+
+static const struct i915_power_well_desc tgl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &tgl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &tgl_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &tgl_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1),
+ I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2),
+ I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3),
+ I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4),
+ I915_PW("DDI_IO_TC5", &tgl_pwdoms_ddi_io_tc5, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5),
+ I915_PW("DDI_IO_TC6", &tgl_pwdoms_ddi_io_tc6, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &tgl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_C),
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_5", &tgl_pwdoms_pw_5,
+ .hsw.idx = TGL_PW_CTL_IDX_PW_5),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_D),
+ },
+};
+
+static const struct i915_power_well_desc tgl_power_wells_tc_cold_off[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("TC_cold_off", &tgl_pwdoms_tc_cold_off,
+ .id = TGL_DISP_PW_TC_COLD_OFF),
+ ),
+ .ops = &tgl_tc_cold_off_ops,
+ },
+};
+
+static const struct i915_power_well_desc tgl_power_wells_aux[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C),
+ I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1),
+ I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2),
+ I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3),
+ I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4),
+ I915_PW("AUX_USBC5", &tgl_pwdoms_aux_usbc5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5),
+ I915_PW("AUX_USBC6", &tgl_pwdoms_aux_usbc6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1),
+ I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2),
+ I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3),
+ I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4),
+ I915_PW("AUX_TBT5", &tgl_pwdoms_aux_tbt5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5),
+ I915_PW("AUX_TBT6", &tgl_pwdoms_aux_tbt6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .is_tc_tbt = true,
+ },
+};
+
+static const struct i915_power_well_desc_list tgl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(tgl_power_wells_main),
+ I915_PW_DESCRIPTORS(tgl_power_wells_tc_cold_off),
+ I915_PW_DESCRIPTORS(tgl_power_wells_aux),
+};
+
+static const struct i915_power_well_desc_list adls_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(tgl_power_wells_main),
+ I915_PW_DESCRIPTORS(tgl_power_wells_aux),
+};
+
+#define RKL_PW_4_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_C
+
+I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_4,
+ RKL_PW_4_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define RKL_PW_3_POWER_DOMAINS \
+ RKL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2
+
+I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_3,
+ RKL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+/*
+ * There is no PW_2/PG_2 on RKL.
+ *
+ * RKL PW_1/PG_1 domains (under HW/DMC control):
+ * - DBUF function (note: registers are in PW0)
+ * - PIPE_A and its planes and VDSC/joining, except VGA
+ * - transcoder A
+ * - DDI_A and DDI_B
+ * - FBC
+ *
+ * RKL PW_0/PG_0 domains (under HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - shared functions:
+ * * interrupts except pipe interrupts
+ * * MBus except PIPE_MBUS_DBOX_CTL
+ * * DBUF registers
+ * - central power except FBC
+ * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
+ */
+
+I915_DECL_PW_DOMAINS(rkl_pwdoms_dc_off,
+ RKL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc rkl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &rkl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &rkl_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &rkl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_C),
+ },
+};
+
+static const struct i915_power_well_desc rkl_power_wells_ddi_aux[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1),
+ I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1),
+ I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list rkl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(rkl_power_wells_main),
+ I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux),
+};
+
+/*
+ * DG1 onwards Audio MMIO/VERBS lies in PG0 power well.
+ */
+#define DG1_PW_3_POWER_DOMAINS \
+ TGL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2
+
+I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_3,
+ DG1_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(dg1_pwdoms_dc_off,
+ DG1_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_2,
+ DG1_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc dg1_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &dg1_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &dg1_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &dg1_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &tgl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_C),
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_5", &tgl_pwdoms_pw_5,
+ .hsw.idx = TGL_PW_CTL_IDX_PW_5),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_D),
+ },
+};
+
+static const struct i915_power_well_desc_list dg1_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(dg1_power_wells_main),
+ I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux),
+};
+
+/*
+ * XE_LPD Power Domains
+ *
+ * Previous platforms required that PG(n-1) be enabled before PG(n). That
+ * dependency chain turns into a dependency tree on XE_LPD:
+ *
+ * PG0
+ * |
+ * --PG1--
+ * / \
+ * PGA --PG2--
+ * / | \
+ * PGB PGC PGD
+ *
+ * Power wells must be enabled from top to bottom and disabled from bottom
+ * to top. This allows pipes to be power gated independently.
+ */
+
+#define XELPD_PW_D_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_D, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_D, \
+ POWER_DOMAIN_TRANSCODER_D
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_d,
+ XELPD_PW_D_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define XELPD_PW_C_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_C
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_c,
+ XELPD_PW_C_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define XELPD_PW_B_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_b,
+ XELPD_PW_B_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_INIT);
+
+#define XELPD_PW_2_POWER_DOMAINS \
+ XELPD_PW_B_POWER_DOMAINS, \
+ XELPD_PW_C_POWER_DOMAINS, \
+ XELPD_PW_D_POWER_DOMAINS, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_C, \
+ POWER_DOMAIN_AUX_D, \
+ POWER_DOMAIN_AUX_E, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2, \
+ POWER_DOMAIN_AUX_USBC3, \
+ POWER_DOMAIN_AUX_USBC4, \
+ POWER_DOMAIN_AUX_TBT1, \
+ POWER_DOMAIN_AUX_TBT2, \
+ POWER_DOMAIN_AUX_TBT3, \
+ POWER_DOMAIN_AUX_TBT4
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2,
+ XELPD_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+/*
+ * XELPD PW_1/PG_1 domains (under HW/DMC control):
+ * - DBUF function (registers are in PW0)
+ * - Transcoder A
+ * - DDI_A and DDI_B
+ *
+ * XELPD PW_0/PW_1 domains (under HW/DMC control):
+ * - PCI
+ * - Clocks except port PLL
+ * - Shared functions:
+ * * interrupts except pipe interrupts
+ * * MBus except PIPE_MBUS_DBOX_CTL
+ * * DBUF registers
+ * - Central power except FBC
+ * - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
+ */
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off,
+ XELPD_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc xelpd_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &xelpd_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xelpd_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xelpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xelpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_D", &xelpd_pwdoms_pw_d,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_D),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = XELPD_PW_CTL_IDX_DDI_D),
+ I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = XELPD_PW_CTL_IDX_DDI_E),
+ I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1),
+ I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2),
+ I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3),
+ I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C),
+ I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D),
+ I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E),
+ I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1),
+ I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2),
+ I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3),
+ I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .fixed_enable_delay = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1),
+ I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2),
+ I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3),
+ I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .is_tc_tbt = true,
+ },
+};
+
+static const struct i915_power_well_desc_list xelpd_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xelpd_power_wells_main),
+};
+
+static void init_power_well_domains(const struct i915_power_well_instance *inst,
+ struct i915_power_well *power_well)
+{
+ int j;
+
+ if (!inst->domain_list)
+ return;
+
+ if (inst->domain_list->count == 0) {
+ bitmap_fill(power_well->domains.bits, POWER_DOMAIN_NUM);
+
+ return;
+ }
+
+ for (j = 0; j < inst->domain_list->count; j++)
+ set_bit(inst->domain_list->list[j], power_well->domains.bits);
+}
+
+#define for_each_power_well_instance_in_desc_list(_desc_list, _desc_count, _desc, _inst) \
+ for ((_desc) = (_desc_list); (_desc) - (_desc_list) < (_desc_count); (_desc)++) \
+ for ((_inst) = (_desc)->instances->list; \
+ (_inst) - (_desc)->instances->list < (_desc)->instances->count; \
+ (_inst)++)
+
+#define for_each_power_well_instance(_desc_list, _desc_count, _descs, _desc, _inst) \
+ for ((_descs) = (_desc_list); \
+ (_descs) - (_desc_list) < (_desc_count); \
+ (_descs)++) \
+ for_each_power_well_instance_in_desc_list((_descs)->list, (_descs)->count, \
+ (_desc), (_inst))
+
+static int
+__set_power_wells(struct i915_power_domains *power_domains,
+ const struct i915_power_well_desc_list *power_well_descs,
+ int power_well_descs_sz)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+ u64 power_well_ids = 0;
+ const struct i915_power_well_desc_list *desc_list;
+ const struct i915_power_well_desc *desc;
+ const struct i915_power_well_instance *inst;
+ int power_well_count = 0;
+ int plt_idx = 0;
+
+ for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst)
+ power_well_count++;
+
+ power_domains->power_well_count = power_well_count;
+ power_domains->power_wells =
+ kcalloc(power_well_count,
+ sizeof(*power_domains->power_wells),
+ GFP_KERNEL);
+ if (!power_domains->power_wells)
+ return -ENOMEM;
+
+ for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst) {
+ struct i915_power_well *pw = &power_domains->power_wells[plt_idx];
+ enum i915_power_well_id id = inst->id;
+
+ pw->desc = desc;
+ drm_WARN_ON(&i915->drm,
+ overflows_type(inst - desc->instances->list, pw->instance_idx));
+ pw->instance_idx = inst - desc->instances->list;
+
+ init_power_well_domains(inst, pw);
+
+ plt_idx++;
+
+ if (id == DISP_PW_ID_NONE)
+ continue;
+
+ drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
+ drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
+ power_well_ids |= BIT_ULL(id);
+ }
+
+ return 0;
+}
+
+#define set_power_wells(power_domains, __power_well_descs) \
+ __set_power_wells(power_domains, __power_well_descs, \
+ ARRAY_SIZE(__power_well_descs))
+
+/**
+ * intel_display_power_map_init - initialize power domain -> power well mappings
+ * @power_domains: power domain state
+ *
+ * Creates all the power wells for the current platform, initializes the
+ * dynamic state for them and initializes the mapping of each power well to
+ * all the power domains the power well belongs to.
+ */
+int intel_display_power_map_init(struct i915_power_domains *power_domains)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (!HAS_DISPLAY(i915)) {
+ power_domains->power_well_count = 0;
+ return 0;
+ }
+
+ if (DISPLAY_VER(i915) >= 13)
+ return set_power_wells(power_domains, xelpd_power_wells);
+ else if (IS_DG1(i915))
+ return set_power_wells(power_domains, dg1_power_wells);
+ else if (IS_ALDERLAKE_S(i915))
+ return set_power_wells(power_domains, adls_power_wells);
+ else if (IS_ROCKETLAKE(i915))
+ return set_power_wells(power_domains, rkl_power_wells);
+ else if (DISPLAY_VER(i915) == 12)
+ return set_power_wells(power_domains, tgl_power_wells);
+ else if (DISPLAY_VER(i915) == 11)
+ return set_power_wells(power_domains, icl_power_wells);
+ else if (IS_GEMINILAKE(i915))
+ return set_power_wells(power_domains, glk_power_wells);
+ else if (IS_BROXTON(i915))
+ return set_power_wells(power_domains, bxt_power_wells);
+ else if (DISPLAY_VER(i915) == 9)
+ return set_power_wells(power_domains, skl_power_wells);
+ else if (IS_CHERRYVIEW(i915))
+ return set_power_wells(power_domains, chv_power_wells);
+ else if (IS_BROADWELL(i915))
+ return set_power_wells(power_domains, bdw_power_wells);
+ else if (IS_HASWELL(i915))
+ return set_power_wells(power_domains, hsw_power_wells);
+ else if (IS_VALLEYVIEW(i915))
+ return set_power_wells(power_domains, vlv_power_wells);
+ else if (IS_I830(i915))
+ return set_power_wells(power_domains, i830_power_wells);
+ else
+ return set_power_wells(power_domains, i9xx_power_wells);
+}
+
+/**
+ * intel_display_power_map_cleanup - clean up power domain -> power well mappings
+ * @power_domains: power domain state
+ *
+ * Cleans up all the state that was initialized by intel_display_power_map_init().
+ */
+void intel_display_power_map_cleanup(struct i915_power_domains *power_domains)
+{
+ kfree(power_domains->power_wells);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.h b/drivers/gpu/drm/i915/display/intel_display_power_map.h
new file mode 100644
index 000000000000..da8f7055a44c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_POWER_MAP_H__
+#define __INTEL_DISPLAY_POWER_MAP_H__
+
+struct i915_power_domains;
+
+int intel_display_power_map_init(struct i915_power_domains *power_domains);
+void intel_display_power_map_cleanup(struct i915_power_domains *power_domains);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
new file mode 100644
index 000000000000..5be18eb94042
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -0,0 +1,1912 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_combo_phy.h"
+#include "intel_combo_phy_regs.h"
+#include "intel_crt.h"
+#include "intel_de.h"
+#include "intel_display_power_well.h"
+#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
+#include "intel_hotplug.h"
+#include "intel_pcode.h"
+#include "intel_pm.h"
+#include "intel_pps.h"
+#include "intel_tc.h"
+#include "intel_vga.h"
+#include "vlv_sideband.h"
+#include "vlv_sideband_reg.h"
+
+struct i915_power_well_regs {
+ i915_reg_t bios;
+ i915_reg_t driver;
+ i915_reg_t kvmr;
+ i915_reg_t debug;
+};
+
+struct i915_power_well_ops {
+ const struct i915_power_well_regs *regs;
+ /*
+ * Synchronize the well's hw state to match the current sw state, for
+ * example enable/disable it based on the current refcount. Called
+ * during driver init and resume time, possibly after first calling
+ * the enable/disable handlers.
+ */
+ void (*sync_hw)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+ /*
+ * Enable the well and resources that depend on it (for example
+ * interrupts located on the well). Called after the 0->1 refcount
+ * transition.
+ */
+ void (*enable)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+ /*
+ * Disable the well and resources that depend on it. Called after
+ * the 1->0 refcount transition.
+ */
+ void (*disable)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+ /* Returns the hw enabled state. */
+ bool (*is_enabled)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+};
+
+static const struct i915_power_well_instance *
+i915_power_well_instance(const struct i915_power_well *power_well)
+{
+ return &power_well->desc->instances->list[power_well->instance_idx];
+}
+
+struct i915_power_well *
+lookup_power_well(struct drm_i915_private *i915,
+ enum i915_power_well_id power_well_id)
+{
+ struct i915_power_well *power_well;
+
+ for_each_power_well(i915, power_well)
+ if (i915_power_well_instance(power_well)->id == power_well_id)
+ return power_well;
+
+ /*
+ * It's not feasible to add error checking code to the callers since
+ * this condition really shouldn't happen and it doesn't even make sense
+ * to abort things like display initialization sequences. Just return
+ * the first power well and hope the WARN gets reported so we can fix
+ * our driver.
+ */
+ drm_WARN(&i915->drm, 1,
+ "Power well %d not defined for this platform\n",
+ power_well_id);
+ return &i915->power_domains.power_wells[0];
+}
+
+void intel_power_well_enable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well));
+ power_well->desc->ops->enable(i915, power_well);
+ power_well->hw_enabled = true;
+}
+
+void intel_power_well_disable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well));
+ power_well->hw_enabled = false;
+ power_well->desc->ops->disable(i915, power_well);
+}
+
+void intel_power_well_sync_hw(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ power_well->desc->ops->sync_hw(i915, power_well);
+ power_well->hw_enabled =
+ power_well->desc->ops->is_enabled(i915, power_well);
+}
+
+void intel_power_well_get(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ if (!power_well->count++)
+ intel_power_well_enable(i915, power_well);
+}
+
+void intel_power_well_put(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ drm_WARN(&i915->drm, !power_well->count,
+ "Use count on power well %s is already zero",
+ i915_power_well_instance(power_well)->name);
+
+ if (!--power_well->count)
+ intel_power_well_disable(i915, power_well);
+}
+
+bool intel_power_well_is_enabled(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ return power_well->desc->ops->is_enabled(i915, power_well);
+}
+
+bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
+{
+ return power_well->hw_enabled;
+}
+
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id)
+{
+ struct i915_power_well *power_well;
+
+ power_well = lookup_power_well(dev_priv, power_well_id);
+
+ return intel_power_well_is_enabled(dev_priv, power_well);
+}
+
+bool intel_power_well_is_always_on(struct i915_power_well *power_well)
+{
+ return power_well->desc->always_on;
+}
+
+const char *intel_power_well_name(struct i915_power_well *power_well)
+{
+ return i915_power_well_instance(power_well)->name;
+}
+
+struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well)
+{
+ return &power_well->domains;
+}
+
+int intel_power_well_refcount(struct i915_power_well *power_well)
+{
+ return power_well->count;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask, bool has_vga)
+{
+ if (has_vga)
+ intel_vga_reset_io_mem(dev_priv);
+
+ if (irq_pipe_mask)
+ gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+}
+
+static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask)
+{
+ if (irq_pipe_mask)
+ gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+}
+
+#define ICL_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
+static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
+{
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+
+ return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
+}
+
+static struct intel_digital_port *
+aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
+ enum aux_ch aux_ch)
+{
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ /* We'll check the MST primary port */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ dig_port = enc_to_dig_port(encoder);
+ if (!dig_port)
+ continue;
+
+ if (dig_port->aux_ch != aux_ch) {
+ dig_port = NULL;
+ continue;
+ }
+
+ break;
+ }
+
+ return dig_port;
+}
+
+static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
+ const struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+
+ return intel_port_to_phy(i915, dig_port->base.port);
+}
+
+static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool timeout_expected)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+
+ /*
+ * For some power wells we're not supposed to watch the status bit for
+ * an ack, but rather just wait a fixed amount of time and then
+ * proceed. This is only used on DG2.
+ */
+ if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) {
+ usleep_range(600, 1200);
+ return;
+ }
+
+ /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
+ if (intel_de_wait_for_set(dev_priv, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
+ drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
+ intel_power_well_name(power_well));
+
+ drm_WARN_ON(&dev_priv->drm, !timeout_expected);
+
+ }
+}
+
+static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+ const struct i915_power_well_regs *regs,
+ int pw_idx)
+{
+ u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 ret;
+
+ ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
+ ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
+ if (regs->kvmr.reg)
+ ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
+ ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
+
+ return ret;
+}
+
+static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ bool disabled;
+ u32 reqs;
+
+ /*
+ * Bspec doesn't require waiting for PWs to get disabled, but still do
+ * this for paranoia. The known cases where a PW will be forced on:
+ * - a KVMR request on any power well via the KVMR request register
+ * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
+ * DEBUG request registers
+ * Skip the wait in case any of the request bits are set and print a
+ * diagnostic message.
+ */
+ wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
+ HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
+ (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
+ if (disabled)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+ intel_power_well_name(power_well),
+ !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+}
+
+static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+ enum skl_power_gate pg)
+{
+ /* Timeout 5us for PG#0, for other PGs 1us */
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 val;
+
+ if (power_well->desc->has_fuses) {
+ enum skl_power_gate pg;
+
+ pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
+
+ /* Wa_16013190616:adlp */
+ if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
+
+ /*
+ * For PW1 we have to wait both for the PW0/PG0 fuse state
+ * before enabling the power well and PW1/PG1's own fuse
+ * state after the enabling. For all other power wells with
+ * fuses we only have to wait for that PW/PG's fuse state
+ * after the enabling.
+ */
+ if (pg == SKL_PG1)
+ gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
+ }
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+
+ if (power_well->desc->has_fuses) {
+ enum skl_power_gate pg;
+
+ pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ gen9_wait_for_power_well_fuses(dev_priv, pg);
+ }
+
+ hsw_power_well_post_enable(dev_priv,
+ power_well->desc->irq_pipe_mask,
+ power_well->desc->has_vga);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 val;
+
+ hsw_power_well_pre_disable(dev_priv,
+ power_well->desc->irq_pipe_mask);
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+static void
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ u32 val;
+
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ if (DISPLAY_VER(dev_priv) < 12) {
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val | ICL_LANE_ENABLE_AUX);
+ }
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+
+ /* Display WA #1178: icl */
+ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+ !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
+ val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
+ val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+ intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
+ }
+}
+
+static void
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ u32 val;
+
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val & ~ICL_LANE_ENABLE_AUX);
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ struct intel_digital_port *dig_port)
+{
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
+ return;
+
+ if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ return;
+
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
+}
+
+#else
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ struct intel_digital_port *dig_port)
+{
+}
+
+#endif
+
+#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
+
+static void icl_tc_cold_exit(struct drm_i915_private *i915)
+{
+ int ret, tries = 0;
+
+ while (1) {
+ ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0,
+ 250, 1);
+ if (ret != -EAGAIN || ++tries == 3)
+ break;
+ msleep(1);
+ }
+
+ /* Spec states that TC cold exit can take up to 1ms to complete */
+ if (!ret)
+ msleep(1);
+
+ /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
+ drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
+ "succeeded");
+}
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ bool is_tbt = power_well->desc->is_tc_tbt;
+ bool timeout_expected;
+ u32 val;
+
+ icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
+
+ val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
+ val &= ~DP_AUX_CH_CTL_TBT_IO;
+ if (is_tbt)
+ val |= DP_AUX_CH_CTL_TBT_IO;
+ intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
+
+ /*
+ * An AUX timeout is expected if the TBT DP tunnel is down,
+ * or need to enable AUX on a legacy TypeC port as part of the TC-cold
+ * exit sequence.
+ */
+ timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
+ if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ icl_tc_cold_exit(dev_priv);
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
+
+ if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
+ enum tc_port tc_port;
+
+ tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
+
+ if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
+ DKL_CMN_UC_DW27_UC_HEALTH, 1))
+ drm_warn(&dev_priv->drm,
+ "Timeout waiting TC uC health\n");
+ }
+}
+
+static void
+icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+
+ if (intel_phy_is_tc(dev_priv, phy))
+ return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
+ else if (IS_ICELAKE(dev_priv))
+ return icl_combo_phy_aux_power_well_enable(dev_priv,
+ power_well);
+ else
+ return hsw_power_well_enable(dev_priv, power_well);
+}
+
+static void
+icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+
+ if (intel_phy_is_tc(dev_priv, phy))
+ return hsw_power_well_disable(dev_priv, power_well);
+ else if (IS_ICELAKE(dev_priv))
+ return icl_combo_phy_aux_power_well_disable(dev_priv,
+ power_well);
+ else
+ return hsw_power_well_disable(dev_priv, power_well);
+}
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
+ HSW_PWR_WELL_CTL_STATE(pw_idx);
+ u32 val;
+
+ val = intel_de_read(dev_priv, regs->driver);
+
+ /*
+ * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+ * and the MISC_IO PW will be not restored, so check instead for the
+ * BIOS's own request bits, which are forced-on for these power wells
+ * when exiting DC5/6.
+ */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+ val |= intel_de_read(dev_priv, regs->bios);
+
+ return (val & mask) == mask;
+}
+
+static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
+ HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+ "Power well 2 on.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+
+ /*
+ * TODO: check for the following to verify the conditions to enter DC9
+ * state are satisfied:
+ * 1] Check relevant display engine registers to verify if mode set
+ * disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
+
+ /*
+ * TODO: check for the following to verify DC9 state was indeed
+ * entered before programming to disable it:
+ * 1] Check relevant display engine registers to verify if mode
+ * set disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ int rewrites = 0;
+ int rereads = 0;
+ u32 v;
+
+ intel_de_write(dev_priv, DC_STATE_EN, state);
+
+ /* It has been observed that disabling the dc6 state sometimes
+ * doesn't stick and dmc keeps returning old value. Make sure
+ * the write really sticks enough times and also force rewrite until
+ * we are confident that state is exactly what we want.
+ */
+ do {
+ v = intel_de_read(dev_priv, DC_STATE_EN);
+
+ if (v != state) {
+ intel_de_write(dev_priv, DC_STATE_EN, state);
+ rewrites++;
+ rereads = 0;
+ } else if (rereads++ > 5) {
+ break;
+ }
+
+ } while (rewrites < 100);
+
+ if (v != state)
+ drm_err(&dev_priv->drm,
+ "Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
+
+ /* Most of the times we need one retry, avoid spam */
+ if (rewrites > 1)
+ drm_dbg_kms(&dev_priv->drm,
+ "Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
+}
+
+static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask;
+
+ mask = DC_STATE_EN_UPTO_DC5;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
+ | DC_STATE_EN_DC9;
+ else if (DISPLAY_VER(dev_priv) == 11)
+ mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ mask |= DC_STATE_EN_DC9;
+ else
+ mask |= DC_STATE_EN_UPTO_DC6;
+
+ return mask;
+}
+
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Resetting DC state tracking from %02x to %02x\n",
+ dev_priv->dmc.dc_state, val);
+ dev_priv->dmc.dc_state = val;
+}
+
+/**
+ * gen9_set_dc_state - set target display C power state
+ * @dev_priv: i915 device instance
+ * @state: target DC power state
+ * - DC_STATE_DISABLE
+ * - DC_STATE_EN_UPTO_DC5
+ * - DC_STATE_EN_UPTO_DC6
+ * - DC_STATE_EN_DC9
+ *
+ * Signal to DMC firmware/HW the target DC power state passed in @state.
+ * DMC/HW can turn off individual display clocks and power rails when entering
+ * a deeper DC power state (higher in number) and turns these back when exiting
+ * that state to a shallower power state (lower in number). The HW will decide
+ * when to actually enter a given state on an on-demand basis, for instance
+ * depending on the active state of display pipes. The state of display
+ * registers backed by affected power rails are saved/restored as needed.
+ *
+ * Based on the above enabling a deeper DC power state is asynchronous wrt.
+ * enabling it. Disabling a deeper power state is synchronous: for instance
+ * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
+ * back on and register state is restored. This is guaranteed by the MMIO write
+ * to DC_STATE_EN blocking until the state is restored.
+ */
+void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
+{
+ u32 val;
+ u32 mask;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ state & ~dev_priv->dmc.allowed_dc_mask))
+ state &= dev_priv->dmc.allowed_dc_mask;
+
+ val = intel_de_read(dev_priv, DC_STATE_EN);
+ mask = gen9_dc_mask(dev_priv);
+ drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
+ val & mask, state);
+
+ /* Check if DMC is ignoring our DC state requests */
+ if ((val & mask) != dev_priv->dmc.dc_state)
+ drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->dmc.dc_state, val & mask);
+
+ val &= ~mask;
+ val |= state;
+
+ gen9_write_dc_state(dev_priv, val);
+
+ dev_priv->dmc.dc_state = val & mask;
+}
+
+static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
+ val = intel_de_read(dev_priv, DC_STATE_EN);
+ val &= ~DC_STATE_DC3CO_STATUS;
+ intel_de_write(dev_priv, DC_STATE_EN, val);
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ /*
+ * Delay of 200us DC3CO Exit time B.Spec 49196
+ */
+ usleep_range(200, 210);
+}
+
+static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ enum i915_power_well_id high_pg;
+
+ /* Power wells at this level and above must be disabled for DC5 entry */
+ if (DISPLAY_VER(dev_priv) == 12)
+ high_pg = ICL_DISP_PW_3;
+ else
+ high_pg = SKL_DISP_PW_2;
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_display_power_well_is_enabled(dev_priv, high_pg),
+ "Power wells above platform's DC5 limit still enabled.\n");
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+ assert_dmc_loaded(dev_priv);
+}
+
+void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc5(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
+}
+
+static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
+
+ assert_dmc_loaded(dev_priv);
+}
+
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc6(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+}
+
+void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc9(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
+ /*
+ * Power sequencer reset is not needed on
+ * platforms with South Display Engine on PCH,
+ * because PPS registers are always on.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv))
+ intel_pps_reset_all(dev_priv);
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
+}
+
+void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ assert_can_disable_dc9(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 bios_req = intel_de_read(dev_priv, regs->bios);
+
+ /* Take over the request bit if set by BIOS. */
+ if (bios_req & mask) {
+ u32 drv_req = intel_de_read(dev_priv, regs->driver);
+
+ if (!(drv_req & mask))
+ intel_de_write(dev_priv, regs->driver, drv_req | mask);
+ intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
+ }
+}
+
+static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+}
+
+static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+}
+
+static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+}
+
+static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *power_well;
+
+ power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
+ if (intel_power_well_refcount(power_well) > 0)
+ bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+
+ power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ if (intel_power_well_refcount(power_well) > 0)
+ bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ power_well = lookup_power_well(dev_priv,
+ GLK_DISP_PW_DPIO_CMN_C);
+ if (intel_power_well_refcount(power_well) > 0)
+ bxt_ddi_phy_verify_state(dev_priv,
+ i915_power_well_instance(power_well)->bxt.phy);
+ }
+}
+
+static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
+}
+
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+{
+ u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+ u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
+
+ drm_WARN(&dev_priv->drm,
+ hw_enabled_dbuf_slices != enabled_dbuf_slices,
+ "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
+ hw_enabled_dbuf_slices,
+ enabled_dbuf_slices);
+}
+
+void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_config cdclk_config = {};
+
+ if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
+ tgl_disable_dc3co(dev_priv);
+ return;
+ }
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
+ /* Can't read out voltage_level so can't use intel_cdclk_changed() */
+ drm_WARN_ON(&dev_priv->drm,
+ intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
+ &cdclk_config));
+
+ gen9_assert_dbuf_enabled(dev_priv);
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ bxt_verify_ddi_phy_power_wells(dev_priv);
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ /*
+ * DMC retains HW context only for port A, the other combo
+ * PHY's HW context for port B is lost after DC transitions,
+ * so we need to restore it manually.
+ */
+ intel_combo_phy_init(dev_priv);
+}
+
+static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ gen9_disable_dc_states(dev_priv);
+}
+
+static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (!intel_dmc_has_payload(dev_priv))
+ return;
+
+ switch (dev_priv->dmc.target_dc_state) {
+ case DC_STATE_EN_DC3CO:
+ tgl_enable_dc3co(dev_priv);
+ break;
+ case DC_STATE_EN_UPTO_DC6:
+ skl_enable_dc6(dev_priv);
+ break;
+ case DC_STATE_EN_UPTO_DC5:
+ gen9_enable_dc5(dev_priv);
+ break;
+ }
+}
+
+static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ i830_enable_pipe(dev_priv, PIPE_A);
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ i830_enable_pipe(dev_priv, PIPE_B);
+}
+
+static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ i830_disable_pipe(dev_priv, PIPE_B);
+ i830_disable_pipe(dev_priv, PIPE_A);
+}
+
+static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
+ intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+}
+
+static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (intel_power_well_refcount(power_well) > 0)
+ i830_pipes_power_well_enable(dev_priv, power_well);
+ else
+ i830_pipes_power_well_disable(dev_priv, power_well);
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
+ PUNIT_PWRGT_PWR_GATE(pw_idx);
+
+ vlv_punit_get(dev_priv);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ vlv_punit_put(dev_priv);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
+
+ vlv_punit_get(dev_priv);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ drm_WARN_ON(&dev_priv->drm, ctrl != state);
+
+ vlv_punit_put(dev_priv);
+
+ return enabled;
+}
+
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /*
+ * On driver load, a pipe may be active and driving a DSI display.
+ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+ * (and never recovering) in this case. intel_dsi_post_disable() will
+ * clear it when we turn off the display.
+ */
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val &= DPOUNIT_CLOCK_GATE_DISABLE;
+ val |= VRHUNIT_CLOCK_GATE_DISABLE;
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ intel_de_write(dev_priv, MI_ARB_VLV,
+ MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ intel_de_write(dev_priv, CBR1_VLV, 0);
+
+ drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
+ intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
+ 1000));
+}
+
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ enum pipe pipe;
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection. Supposedly DSI also
+ * needs the ref clock up and running.
+ *
+ * CHV DPLL B/C have some issues if VGA mode is enabled.
+ */
+ for_each_pipe(dev_priv, pipe) {
+ u32 val = intel_de_read(dev_priv, DPLL(pipe));
+
+ val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ }
+
+ vlv_init_display_clock_gating(dev_priv);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+ intel_hpd_poll_disable(dev_priv);
+
+ /* Re-enable the ADPA, if we have one */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_ANALOG)
+ intel_crt_reset(&encoder->base);
+ }
+
+ intel_vga_redisable_power_on(dev_priv);
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* make sure we're done processing display irqs */
+ intel_synchronize_irq(dev_priv);
+
+ intel_pps_reset_all(dev_priv);
+
+ /* Prevent us from re-enabling polling on accident in late suspend */
+ if (!dev_priv->drm.dev->power.is_suspended)
+ intel_hpd_poll_enable(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_display_power_well_deinit(dev_priv);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ /* since ref/cri clock was enabled */
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
+
+static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+ u32 phy_control = dev_priv->chv_phy_control;
+ u32 phy_status = 0;
+ u32 phy_status_mask = 0xffffffff;
+
+ /*
+ * The BIOS can leave the PHY is some weird state
+ * where it doesn't fully power down some parts.
+ * Disable the asserts until the PHY has been fully
+ * reset (ie. the power well has been disabled at
+ * least once).
+ */
+ if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+ phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
+ PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
+
+ if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+ phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
+
+ if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY0);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
+
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
+
+ /* CL1 is on whenever anything is on in either channel */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
+
+ /*
+ * The DPLLB check accounts for the pipe B + port A usage
+ * with CL2 powered up but all the lanes in the second channel
+ * powered down.
+ */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
+ }
+
+ if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY1);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
+ }
+
+ phy_status &= phy_status_mask;
+
+ /*
+ * The PHY may be busy with some initial calibration and whatnot,
+ * so the power state can take a while to actually change.
+ */
+ if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10))
+ drm_err(&dev_priv->drm,
+ "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, dev_priv->chv_phy_control);
+}
+
+#undef BITS_SET
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
+ enum dpio_phy phy;
+ enum pipe pipe;
+ u32 tmp;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ id != VLV_DISP_PW_DPIO_CMN_BC &&
+ id != CHV_DISP_PW_DPIO_CMN_D);
+
+ if (id == VLV_DISP_PW_DPIO_CMN_BC) {
+ pipe = PIPE_A;
+ phy = DPIO_PHY0;
+ } else {
+ pipe = PIPE_C;
+ phy = DPIO_PHY1;
+ }
+
+ /* since ref/cri clock was enabled */
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /* Poll for phypwrgood signal */
+ if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy), 1))
+ drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
+ phy);
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable dynamic power down */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+ tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
+ DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+
+ if (id == VLV_DISP_PW_DPIO_CMN_BC) {
+ tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+ tmp |= DPIO_DYNPWRDOWNEN_CH1;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+ } else {
+ /*
+ * Force the non-existing CL2 off. BXT does this
+ * too, so maybe it saves some power even though
+ * CL2 doesn't exist?
+ */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ tmp |= DPIO_CL2_LDOFUSE_PWRENB;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+ }
+
+ vlv_dpio_put(dev_priv);
+
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
+ enum dpio_phy phy;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ id != VLV_DISP_PW_DPIO_CMN_BC &&
+ id != CHV_DISP_PW_DPIO_CMN_D);
+
+ if (id == VLV_DISP_PW_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ assert_pll_disabled(dev_priv, PIPE_A);
+ assert_pll_disabled(dev_priv, PIPE_B);
+ } else {
+ phy = DPIO_PHY1;
+ assert_pll_disabled(dev_priv, PIPE_C);
+ }
+
+ dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
+
+ /* PHY is fully reset now, so we can enable the PHY state asserts */
+ dev_priv->chv_phy_assert[phy] = true;
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override, unsigned int mask)
+{
+ enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
+ u32 reg, val, expected, actual;
+
+ /*
+ * The BIOS can leave the PHY is some weird state
+ * where it doesn't fully power down some parts.
+ * Disable the asserts until the PHY has been fully
+ * reset (ie. the power well has been disabled at
+ * least once).
+ */
+ if (!dev_priv->chv_phy_assert[phy])
+ return;
+
+ if (ch == DPIO_CH0)
+ reg = _CHV_CMN_DW0_CH0;
+ else
+ reg = _CHV_CMN_DW6_CH1;
+
+ vlv_dpio_get(dev_priv);
+ val = vlv_dpio_read(dev_priv, pipe, reg);
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * This assumes !override is only used when the port is disabled.
+ * All lanes should power down even without the override when
+ * the port is disabled.
+ */
+ if (!override || mask == 0xf) {
+ expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+ /*
+ * If CH1 common lane is not active anymore
+ * (eg. for pipe B DPLL) the entire channel will
+ * shut down, which causes the common lane registers
+ * to read as 0. That means we can't actually check
+ * the lane power down status bits, but as the entire
+ * register reads as 0 it's a good indication that the
+ * channel is indeed entirely powered down.
+ */
+ if (ch == DPIO_CH1 && val == 0)
+ expected = 0;
+ } else if (mask != 0x0) {
+ expected = DPIO_ANYDL_POWERDOWN;
+ } else {
+ expected = 0;
+ }
+
+ if (ch == DPIO_CH0)
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+ else
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
+ actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+
+ drm_WARN(&dev_priv->drm, actual != expected,
+ "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+ !!(actual & DPIO_ALLDL_POWERDOWN),
+ !!(actual & DPIO_ANYDL_POWERDOWN),
+ !!(expected & DPIO_ALLDL_POWERDOWN),
+ !!(expected & DPIO_ANYDL_POWERDOWN),
+ reg, val);
+}
+
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool was_override;
+
+ mutex_lock(&power_domains->lock);
+
+ was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ if (override == was_override)
+ goto out;
+
+ if (override)
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+ phy, ch, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+out:
+ mutex_unlock(&power_domains->lock);
+
+ return was_override;
+}
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
+
+ mutex_lock(&power_domains->lock);
+
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+
+ if (override)
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+ phy, ch, mask, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+ assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe = PIPE_A;
+ bool enabled;
+ u32 state, ctrl;
+
+ vlv_punit_get(dev_priv);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+ state != DP_SSS_PWR_GATE(pipe));
+ enabled = state == DP_SSS_PWR_ON(pipe);
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+ drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
+
+ vlv_punit_put(dev_priv);
+
+ return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool enable)
+{
+ enum pipe pipe = PIPE_A;
+ u32 state;
+ u32 ctrl;
+
+ state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+ vlv_punit_get(dev_priv);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ ctrl &= ~DP_SSC_MASK(pipe);
+ ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+
+ if (wait_for(COND, 100))
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+
+#undef COND
+
+out:
+ vlv_punit_put(dev_priv);
+}
+
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ chv_set_pipe_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_display_power_well_deinit(dev_priv);
+
+ chv_set_pipe_power_well(dev_priv, power_well, false);
+}
+
+static void
+tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
+{
+ u8 tries = 0;
+ int ret;
+
+ while (1) {
+ u32 low_val;
+ u32 high_val = 0;
+
+ if (block)
+ low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
+ else
+ low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
+
+ /*
+ * Spec states that we should timeout the request after 200us
+ * but the function below will timeout after 500us
+ */
+ ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ if (ret == 0) {
+ if (block &&
+ (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
+ ret = -EIO;
+ else
+ break;
+ }
+
+ if (++tries == 3)
+ break;
+
+ msleep(1);
+ }
+
+ if (ret)
+ drm_err(&i915->drm, "TC cold %sblock failed\n",
+ block ? "" : "un");
+ else
+ drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
+ block ? "" : "un");
+}
+
+static void
+tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ tgl_tc_cold_request(i915, true);
+}
+
+static void
+tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ tgl_tc_cold_request(i915, false);
+}
+
+static void
+tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ if (intel_power_well_refcount(power_well) > 0)
+ tgl_tc_cold_off_power_well_enable(i915, power_well);
+ else
+ tgl_tc_cold_off_power_well_disable(i915, power_well);
+}
+
+static bool
+tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ /*
+ * Not the correctly implementation but there is no way to just read it
+ * from PCODE, so returning count to avoid state mismatch errors
+ */
+ return intel_power_well_refcount(power_well);
+}
+
+
+const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+const struct i915_power_well_ops chv_pipe_power_well_ops = {
+ .sync_hw = chv_pipe_power_well_sync_hw,
+ .enable = chv_pipe_power_well_enable,
+ .disable = chv_pipe_power_well_disable,
+ .is_enabled = chv_pipe_power_well_enabled,
+};
+
+const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = chv_dpio_cmn_power_well_enable,
+ .disable = chv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+const struct i915_power_well_ops i830_pipes_power_well_ops = {
+ .sync_hw = i830_pipes_power_well_sync_hw,
+ .enable = i830_pipes_power_well_enable,
+ .disable = i830_pipes_power_well_disable,
+ .is_enabled = i830_pipes_power_well_enabled,
+};
+
+static const struct i915_power_well_regs hsw_power_well_regs = {
+ .bios = HSW_PWR_WELL_CTL1,
+ .driver = HSW_PWR_WELL_CTL2,
+ .kvmr = HSW_PWR_WELL_CTL3,
+ .debug = HSW_PWR_WELL_CTL4,
+};
+
+const struct i915_power_well_ops hsw_power_well_ops = {
+ .regs = &hsw_power_well_regs,
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = gen9_dc_off_power_well_enable,
+ .disable = gen9_dc_off_power_well_disable,
+ .is_enabled = gen9_dc_off_power_well_enabled,
+};
+
+const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = bxt_dpio_cmn_power_well_enable,
+ .disable = bxt_dpio_cmn_power_well_disable,
+ .is_enabled = bxt_dpio_cmn_power_well_enabled,
+};
+
+const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_regs icl_aux_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_AUX1,
+ .driver = ICL_PWR_WELL_CTL_AUX2,
+ .debug = ICL_PWR_WELL_CTL_AUX4,
+};
+
+const struct i915_power_well_ops icl_aux_power_well_ops = {
+ .regs = &icl_aux_power_well_regs,
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_aux_power_well_enable,
+ .disable = icl_aux_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_regs icl_ddi_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_DDI1,
+ .driver = ICL_PWR_WELL_CTL_DDI2,
+ .debug = ICL_PWR_WELL_CTL_DDI4,
+};
+
+const struct i915_power_well_ops icl_ddi_power_well_ops = {
+ .regs = &icl_ddi_power_well_regs,
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+const struct i915_power_well_ops tgl_tc_cold_off_ops = {
+ .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
+ .enable = tgl_tc_cold_off_power_well_enable,
+ .disable = tgl_tc_cold_off_power_well_disable,
+ .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
+};
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
new file mode 100644
index 000000000000..d0624642dcb6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+#ifndef __INTEL_DISPLAY_POWER_WELL_H__
+#define __INTEL_DISPLAY_POWER_WELL_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+#include "intel_display_power.h"
+
+struct drm_i915_private;
+struct i915_power_well;
+
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells < \
+ (__dev_priv)->power_domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_reverse(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
+ (__dev_priv)->power_domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ (__power_well)--)
+
+/*
+ * i915_power_well_id:
+ *
+ * IDs used to look up power wells. Power wells accessed directly bypassing
+ * the power domains framework must be assigned a unique ID. The rest of power
+ * wells must be assigned DISP_PW_ID_NONE.
+ */
+enum i915_power_well_id {
+ DISP_PW_ID_NONE = 0, /* must be kept zero */
+
+ VLV_DISP_PW_DISP2D,
+ BXT_DISP_PW_DPIO_CMN_A,
+ VLV_DISP_PW_DPIO_CMN_BC,
+ GLK_DISP_PW_DPIO_CMN_C,
+ CHV_DISP_PW_DPIO_CMN_D,
+ HSW_DISP_PW_GLOBAL,
+ SKL_DISP_PW_MISC_IO,
+ SKL_DISP_PW_1,
+ SKL_DISP_PW_2,
+ ICL_DISP_PW_3,
+ SKL_DISP_DC_OFF,
+ TGL_DISP_PW_TC_COLD_OFF,
+};
+
+struct i915_power_well_instance {
+ const char *name;
+ const struct i915_power_domain_list {
+ const enum intel_display_power_domain *list;
+ u8 count;
+ } *domain_list;
+
+ /* unique identifier for this power well */
+ enum i915_power_well_id id;
+ /*
+ * Arbitraty data associated with this power well. Platform and power
+ * well specific.
+ */
+ union {
+ struct {
+ /*
+ * request/status flag index in the PUNIT power well
+ * control/status registers.
+ */
+ u8 idx;
+ } vlv;
+ struct {
+ enum dpio_phy phy;
+ } bxt;
+ struct {
+ /*
+ * request/status flag index in the power well
+ * constrol/status registers.
+ */
+ u8 idx;
+ } hsw;
+ };
+};
+
+struct i915_power_well_desc {
+ const struct i915_power_well_ops *ops;
+ const struct i915_power_well_instance_list {
+ const struct i915_power_well_instance *list;
+ u8 count;
+ } *instances;
+
+ /* Mask of pipes whose IRQ logic is backed by the pw */
+ u16 irq_pipe_mask:4;
+ u16 always_on:1;
+ /*
+ * Instead of waiting for the status bit to ack enables,
+ * just wait a specific amount of time and then consider
+ * the well enabled.
+ */
+ u16 fixed_enable_delay:1;
+ /* The pw is backing the VGA functionality */
+ u16 has_vga:1;
+ u16 has_fuses:1;
+ /*
+ * The pw is for an ICL+ TypeC PHY port in
+ * Thunderbolt mode.
+ */
+ u16 is_tc_tbt:1;
+};
+
+struct i915_power_well {
+ const struct i915_power_well_desc *desc;
+ struct intel_power_domain_mask domains;
+ /* power well enable/disable usage count */
+ int count;
+ /* cached hw enabled state */
+ bool hw_enabled;
+ /* index into desc->instances->list */
+ u8 instance_idx;
+};
+
+struct i915_power_well *lookup_power_well(struct drm_i915_private *i915,
+ enum i915_power_well_id id);
+
+void intel_power_well_enable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+void intel_power_well_disable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+void intel_power_well_sync_hw(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+void intel_power_well_get(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+void intel_power_well_put(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+bool intel_power_well_is_enabled(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well);
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id);
+bool intel_power_well_is_always_on(struct i915_power_well *power_well);
+const char *intel_power_well_name(struct i915_power_well *power_well);
+struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well);
+int intel_power_well_refcount(struct i915_power_well *power_well);
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask);
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override);
+
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
+void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state);
+void gen9_disable_dc_states(struct drm_i915_private *dev_priv);
+void bxt_enable_dc9(struct drm_i915_private *dev_priv);
+void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+
+extern const struct i915_power_well_ops i9xx_always_on_power_well_ops;
+extern const struct i915_power_well_ops chv_pipe_power_well_ops;
+extern const struct i915_power_well_ops chv_dpio_cmn_power_well_ops;
+extern const struct i915_power_well_ops i830_pipes_power_well_ops;
+extern const struct i915_power_well_ops hsw_power_well_ops;
+extern const struct i915_power_well_ops gen9_dc_off_power_well_ops;
+extern const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops;
+extern const struct i915_power_well_ops vlv_display_power_well_ops;
+extern const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops;
+extern const struct i915_power_well_ops vlv_dpio_power_well_ops;
+extern const struct i915_power_well_ops icl_aux_power_well_ops;
+extern const struct i915_power_well_ops icl_ddi_power_well_ops;
+extern const struct i915_power_well_ops tgl_tc_cold_off_ops;
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index f05f0f9b5103..2dd5a4b7f5d8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -9,6 +9,7 @@
#if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
#define __INTEL_DISPLAY_TRACE_H__
+#include <linux/string_helpers.h>
#include <linux/types.h>
#include <linux/tracepoint.h>
@@ -161,7 +162,7 @@ TRACE_EVENT(intel_memory_cxsr,
),
TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
- onoff(__entry->old), onoff(__entry->new),
+ str_on_off(__entry->old), str_on_off(__entry->new),
__entry->frame[PIPE_A], __entry->scanline[PIPE_A],
__entry->frame[PIPE_B], __entry->scanline[PIPE_B],
__entry->frame[PIPE_C], __entry->scanline[PIPE_C])
@@ -210,9 +211,9 @@ TRACE_EVENT(g4x_wm,
TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s",
pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
__entry->primary, __entry->sprite, __entry->cursor,
- yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc,
- yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc,
- yesno(__entry->fbc))
+ str_yes_no(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc,
+ str_yes_no(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc,
+ str_yes_no(__entry->fbc))
);
TRACE_EVENT(vlv_wm,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 776b3e6662f2..408152f9f46a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -31,11 +31,11 @@
#include <linux/pwm.h>
#include <linux/sched/clock.h>
-#include <drm/dp/drm_dp_dual_mode_helper.h>
-#include <drm/dp/drm_dp_mst_helper.h>
+#include <drm/display/drm_dp_dual_mode_helper.h>
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_dsc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_dsc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
@@ -280,8 +280,7 @@ struct intel_panel_bl_funcs {
};
struct intel_panel {
- struct drm_display_mode *fixed_mode;
- struct drm_display_mode *downclock_mode;
+ struct list_head fixed_modes;
/* backlight */
struct {
@@ -847,8 +846,13 @@ struct intel_crtc_wm_state {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
+ /*
+ * pre-icl: for packed/planar CbCr
+ * icl+: for everything
+ */
+ struct skl_ddb_entry plane_ddb[I915_MAX_PLANES];
+ /* pre-icl: for planar Y */
struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
- struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES];
} skl;
struct {
@@ -954,7 +958,7 @@ struct intel_crtc_state {
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
* and get clipped at the edges. */
- int pipe_src_w, pipe_src_h;
+ struct drm_rect pipe_src;
/*
* Pipe pixel rate, adjusted for
@@ -1125,11 +1129,14 @@ struct intel_crtc_state {
int min_cdclk[I915_MAX_PLANES];
+ /* for packed/planar CbCr */
u32 data_rate[I915_MAX_PLANES];
+ /* for planar Y */
+ u32 data_rate_y[I915_MAX_PLANES];
- /* FIXME unify with data_rate[] */
- u64 plane_data_rate[I915_MAX_PLANES];
- u64 uv_plane_data_rate[I915_MAX_PLANES];
+ /* FIXME unify with data_rate[]? */
+ u64 rel_data_rate[I915_MAX_PLANES];
+ u64 rel_data_rate_y[I915_MAX_PLANES];
/* Gamma mode programmed on the pipe */
u32 gamma_mode;
@@ -1154,6 +1161,9 @@ struct intel_crtc_state {
/* bitmask of planes that will be updated during the commit */
u8 update_planes;
+ u8 framestart_delay; /* 1-4 */
+ u8 msa_timing_delay; /* 0-3 */
+
struct {
u32 enable;
u32 gcp;
@@ -1179,9 +1189,6 @@ struct intel_crtc_state {
/* enable pipe csc? */
bool csc_enable;
- /* enable pipe big joiner? */
- bool bigjoiner;
-
/* big joiner pipe bitmask */
u8 bigjoiner_pipes;
@@ -1252,6 +1259,11 @@ enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_MAX,
};
+enum drrs_refresh_rate {
+ DRRS_REFRESH_RATE_HIGH,
+ DRRS_REFRESH_RATE_LOW,
+};
+
#define INTEL_PIPE_CRC_ENTRIES_NR 128
struct intel_pipe_crc {
spinlock_t lock;
@@ -1294,6 +1306,16 @@ struct intel_crtc {
} active;
} wm;
+ struct {
+ struct mutex mutex;
+ struct delayed_work work;
+ enum drrs_refresh_rate refresh_rate;
+ unsigned int frontbuffer_bits;
+ unsigned int busy_frontbuffer_bits;
+ enum transcoder cpu_transcoder;
+ struct intel_link_m_n m_n, m2_n2;
+ } drrs;
+
int scanline_offset;
struct {
@@ -1503,6 +1525,7 @@ struct intel_psr {
bool colorimetry_support;
bool psr2_enabled;
bool psr2_sel_fetch_enabled;
+ bool psr2_sel_fetch_cff_enabled;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
ktime_t last_entry_attempt;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 1b774dcfb281..a171d42a5c5b 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -28,6 +28,7 @@
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_dmc.h"
+#include "intel_dmc_regs.h"
/**
* DOC: DMC Firmware Support
@@ -37,6 +38,10 @@
* low-power state and comes back to normal.
*/
+#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
+#define DMC_VERSION_MAJOR(version) ((version) >> 16)
+#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
+
#define DMC_PATH(platform, major, minor) \
"i915/" \
__stringify(platform) "_dmc_ver" \
@@ -47,8 +52,8 @@
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 14)
-#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 14)
+#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
+#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 16)
MODULE_FIRMWARE(ADLP_DMC_PATH);
#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
@@ -276,17 +281,8 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
struct intel_dmc *dmc = &dev_priv->dmc;
u32 id, i;
- if (!HAS_DMC(dev_priv)) {
- drm_err(&dev_priv->drm,
- "No DMC support available for this platform\n");
+ if (!intel_dmc_has_payload(dev_priv))
return;
- }
-
- if (!dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload) {
- drm_err(&dev_priv->drm,
- "Tried to program CSR with empty payload\n");
- return;
- }
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
@@ -314,6 +310,17 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
gen9_set_dc_state_debugmask(dev_priv);
}
+void assert_dmc_loaded(struct drm_i915_private *i915)
+{
+ drm_WARN_ONCE(&i915->drm,
+ !intel_de_read(i915, DMC_PROGRAM(i915->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ "DMC program storage start is NULL\n");
+ drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
+ "DMC SSP Base Not fine\n");
+ drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL),
+ "DMC HTP Not fine\n");
+}
+
static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
const struct stepping_info *si)
{
@@ -741,7 +748,7 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
dmc->fw_path = RKL_DMC_PATH;
dmc->required_version = RKL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (IS_TIGERLAKE(dev_priv)) {
dmc->fw_path = TGL_DMC_PATH;
dmc->required_version = TGL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
@@ -852,3 +859,101 @@ void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
for (id = 0; id < DMC_FW_MAX; id++)
kfree(dev_priv->dmc.dmc_info[id].payload);
}
+
+void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
+ struct drm_i915_private *i915)
+{
+ struct intel_dmc *dmc = &i915->dmc;
+
+ if (!HAS_DMC(i915))
+ return;
+
+ i915_error_printf(m, "DMC loaded: %s\n",
+ str_yes_no(intel_dmc_has_payload(i915)));
+ i915_error_printf(m, "DMC fw version: %d.%d\n",
+ DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
+}
+
+static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ intel_wakeref_t wakeref;
+ struct intel_dmc *dmc;
+ i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
+
+ if (!HAS_DMC(i915))
+ return -ENODEV;
+
+ dmc = &i915->dmc;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ seq_printf(m, "fw loaded: %s\n",
+ str_yes_no(intel_dmc_has_payload(i915)));
+ seq_printf(m, "path: %s\n", dmc->fw_path);
+ seq_printf(m, "Pipe A fw support: %s\n",
+ str_yes_no(GRAPHICS_VER(i915) >= 12));
+ seq_printf(m, "Pipe A fw loaded: %s\n",
+ str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload));
+ seq_printf(m, "Pipe B fw support: %s\n",
+ str_yes_no(IS_ALDERLAKE_P(i915)));
+ seq_printf(m, "Pipe B fw loaded: %s\n",
+ str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload));
+
+ if (!intel_dmc_has_payload(i915))
+ goto out;
+
+ seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
+
+ if (DISPLAY_VER(i915) >= 12) {
+ if (IS_DGFX(i915)) {
+ dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
+ } else {
+ dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
+ dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ }
+
+ /*
+ * NOTE: DMC_DEBUG3 is a general purpose reg.
+ * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+ * reg for DC3CO debugging and validation,
+ * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+ */
+ seq_printf(m, "DC3CO count: %d\n",
+ intel_de_read(i915, IS_DGFX(i915) ?
+ DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3));
+ } else {
+ dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
+ SKL_DMC_DC3_DC5_COUNT;
+ if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915))
+ dc6_reg = SKL_DMC_DC5_DC6_COUNT;
+ }
+
+ seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg));
+ if (i915_mmio_reg_valid(dc6_reg))
+ seq_printf(m, "DC5 -> DC6 count: %d\n",
+ intel_de_read(i915, dc6_reg));
+
+out:
+ seq_printf(m, "program base: 0x%08x\n",
+ intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
+ seq_printf(m, "ssp base: 0x%08x\n",
+ intel_de_read(i915, DMC_SSP_BASE));
+ seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
+
+void intel_dmc_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
+ i915, &intel_dmc_debugfs_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 7c590309a3a9..41091aee3b47 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -10,12 +10,9 @@
#include "intel_wakeref.h"
#include <linux/workqueue.h>
+struct drm_i915_error_state_buf;
struct drm_i915_private;
-#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
-#define DMC_VERSION_MAJOR(version) ((version) >> 16)
-#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
-
enum {
DMC_FW_MAIN = 0,
DMC_FW_PIPEA,
@@ -54,5 +51,10 @@ void intel_dmc_ucode_fini(struct drm_i915_private *i915);
void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
void intel_dmc_ucode_resume(struct drm_i915_private *i915);
bool intel_dmc_has_payload(struct drm_i915_private *i915);
+void intel_dmc_debugfs_register(struct drm_i915_private *i915);
+void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
+ struct drm_i915_private *i915);
+
+void assert_dmc_loaded(struct drm_i915_private *i915);
#endif /* __INTEL_DMC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
new file mode 100644
index 000000000000..7853827988d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DMC_REGS_H__
+#define __INTEL_DMC_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
+#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
+#define DMC_HTP_ADDR_SKL 0x00500034
+#define DMC_SSP_BASE _MMIO(0x8F074)
+#define DMC_HTP_SKL _MMIO(0x8F004)
+#define DMC_LAST_WRITE _MMIO(0x8F034)
+#define DMC_LAST_WRITE_VALUE 0xc003b400
+#define DMC_MMIO_START_RANGE 0x80000
+#define DMC_MMIO_END_RANGE 0x8FFFF
+#define DMC_V1_MMIO_START_RANGE 0x80000
+#define TGL_MAIN_MMIO_START 0x8F000
+#define TGL_MAIN_MMIO_END 0x8FFFF
+#define _TGL_PIPEA_MMIO_START 0x92000
+#define _TGL_PIPEA_MMIO_END 0x93FFF
+#define _TGL_PIPEB_MMIO_START 0x96000
+#define _TGL_PIPEB_MMIO_END 0x97FFF
+#define ADLP_PIPE_MMIO_START 0x5F000
+#define ADLP_PIPE_MMIO_END 0x5FFFF
+
+#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
+ _TGL_PIPEB_MMIO_START)
+
+#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
+ _TGL_PIPEB_MMIO_END)
+
+#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
+#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
+#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
+#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
+#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
+#define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154)
+
+#define TGL_DMC_DEBUG3 _MMIO(0x101090)
+#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
+
+#endif /* __INTEL_DMC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f868db8be02a..e4a79c11fd25 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -29,15 +29,17 @@
#include <linux/i2c.h>
#include <linux/notifier.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include <linux/timekeeping.h>
#include <linux/types.h>
#include <asm/byteorder.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/dp/drm_dp_helper.h>
-#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
@@ -59,7 +61,6 @@
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
-#include "intel_drrs.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -67,6 +68,7 @@
#include "intel_lspcon.h"
#include "intel_lvds.h"
#include "intel_panel.h"
+#include "intel_pch_display.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_tc.h"
@@ -386,23 +388,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
}
-static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
-{
- u32 voltage;
-
- voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
-
- return voltage == VOLTAGE_INFO_0_85V;
-}
-
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_phy_is_combo(dev_priv, phy) &&
- (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
+ if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -410,23 +402,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
static int ehl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
- if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
- return 540000;
-
- return 810000;
-}
-
-static int dg1_max_source_rate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
- if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
+ if (intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -469,7 +445,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
max_rate = dg2_max_source_rate(intel_dp);
else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
- max_rate = dg1_max_source_rate(intel_dp);
+ max_rate = 810000;
else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
@@ -580,8 +556,9 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
int link_rate,
u8 lane_count)
{
+ /* FIXME figure out what we actually want here */
const struct drm_display_mode *fixed_mode =
- intel_dp->attached_connector->panel.fixed_mode;
+ intel_panel_preferred_fixed_mode(intel_dp->attached_connector);
int mode_rate, max_rate;
mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
@@ -783,14 +760,12 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
}
static enum intel_output_format
-intel_dp_output_format(struct drm_connector *connector,
- const struct drm_display_mode *mode)
+intel_dp_output_format(struct intel_connector *connector,
+ bool ycbcr_420_output)
{
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
- const struct drm_display_info *info = &connector->display_info;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
- if (!connector->ycbcr_420_allowed ||
- !drm_mode_is_420_only(info, mode))
+ if (!connector->base.ycbcr_420_allowed || !ycbcr_420_output)
return INTEL_OUTPUT_FORMAT_RGB;
if (intel_dp->dfp.rgb_to_ycbcr &&
@@ -825,11 +800,12 @@ static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
}
static int
-intel_dp_mode_min_output_bpp(struct drm_connector *connector,
+intel_dp_mode_min_output_bpp(struct intel_connector *connector,
const struct drm_display_mode *mode)
{
+ const struct drm_display_info *info = &connector->base.display_info;
enum intel_output_format output_format =
- intel_dp_output_format(connector, mode);
+ intel_dp_output_format(connector, drm_mode_is_420_only(info, mode));
return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
}
@@ -853,6 +829,43 @@ static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
return hdisplay == 4096 && !HAS_DDI(dev_priv);
}
+static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
+ int max_tmds_clock = intel_dp->dfp.max_tmds_clock;
+
+ /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */
+ if (max_tmds_clock && info->max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
+
+ return max_tmds_clock;
+}
+
+static enum drm_mode_status
+intel_dp_tmds_clock_valid(struct intel_dp *intel_dp,
+ int clock, int bpc, bool ycbcr420_output,
+ bool respect_downstream_limits)
+{
+ int tmds_clock, min_tmds_clock, max_tmds_clock;
+
+ if (!respect_downstream_limits)
+ return MODE_OK;
+
+ tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
+
+ min_tmds_clock = intel_dp->dfp.min_tmds_clock;
+ max_tmds_clock = intel_dp_max_tmds_clock(intel_dp);
+
+ if (min_tmds_clock && tmds_clock < min_tmds_clock)
+ return MODE_CLOCK_LOW;
+
+ if (max_tmds_clock && tmds_clock > max_tmds_clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
static enum drm_mode_status
intel_dp_mode_valid_downstream(struct intel_connector *connector,
const struct drm_display_mode *mode,
@@ -860,13 +873,14 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
const struct drm_display_info *info = &connector->base.display_info;
- int tmds_clock;
+ enum drm_mode_status status;
+ bool ycbcr_420_only;
/* If PCON supports FRL MODE, check FRL bandwidth constraints */
if (intel_dp->dfp.pcon_max_frl_bw) {
int target_bw;
int max_frl_bw;
- int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
+ int bpp = intel_dp_mode_min_output_bpp(connector, mode);
target_bw = bpp * target_clock;
@@ -885,16 +899,23 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
target_clock > intel_dp->dfp.max_dotclock)
return MODE_CLOCK_HIGH;
+ ycbcr_420_only = drm_mode_is_420_only(info, mode);
+
/* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
- tmds_clock = intel_hdmi_tmds_clock(target_clock, 8,
- drm_mode_is_420_only(info, mode));
+ status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
+ 8, ycbcr_420_only, true);
- if (intel_dp->dfp.min_tmds_clock &&
- tmds_clock < intel_dp->dfp.min_tmds_clock)
- return MODE_CLOCK_LOW;
- if (intel_dp->dfp.max_tmds_clock &&
- tmds_clock > intel_dp->dfp.max_tmds_clock)
- return MODE_CLOCK_HIGH;
+ if (status != MODE_OK) {
+ if (ycbcr_420_only ||
+ !connector->base.ycbcr_420_allowed ||
+ !drm_mode_is_420_also(info, mode))
+ return status;
+
+ status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
+ 8, true, true);
+ if (status != MODE_OK)
+ return status;
+ }
return MODE_OK;
}
@@ -911,13 +932,13 @@ static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
}
static enum drm_mode_status
-intel_dp_mode_valid(struct drm_connector *connector,
+intel_dp_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk = dev_priv->max_dotclk_freq;
@@ -932,8 +953,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
+ fixed_mode = intel_panel_fixed_mode(connector, mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
- status = intel_panel_mode_valid(intel_connector, mode);
+ status = intel_panel_mode_valid(connector, mode);
if (status != MODE_OK)
return status;
@@ -1007,8 +1029,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode_rate > max_rate && !dsc)
return MODE_CLOCK_HIGH;
- status = intel_dp_mode_valid_downstream(intel_connector,
- mode, target_clock);
+ status = intel_dp_mode_valid_downstream(connector, mode, target_clock);
if (status != MODE_OK)
return status;
@@ -1130,44 +1151,50 @@ static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
}
-static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static bool intel_dp_is_ycbcr420(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
(crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
intel_dp->dfp.ycbcr_444_to_420);
}
-static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state, int bpc)
+static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int bpc, bool respect_downstream_limits)
{
+ bool ycbcr420_output = intel_dp_is_ycbcr420(intel_dp, crtc_state);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
- int tmds_clock = intel_hdmi_tmds_clock(clock, bpc,
- intel_dp_hdmi_ycbcr420(intel_dp, crtc_state));
-
- if (intel_dp->dfp.min_tmds_clock &&
- tmds_clock < intel_dp->dfp.min_tmds_clock)
- return false;
- if (intel_dp->dfp.max_tmds_clock &&
- tmds_clock > intel_dp->dfp.max_tmds_clock)
- return false;
+ /*
+ * Current bpc could already be below 8bpc due to
+ * FDI bandwidth constraints or other limits.
+ * HDMI minimum is 8bpc however.
+ */
+ bpc = max(bpc, 8);
- return true;
-}
+ /*
+ * We will never exceed downstream TMDS clock limits while
+ * attempting deep color. If the user insists on forcing an
+ * out of spec mode they will have to be satisfied with 8bpc.
+ */
+ if (!respect_downstream_limits)
+ bpc = 8;
-static bool intel_dp_hdmi_bpc_possible(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- int bpc)
-{
+ for (; bpc >= 8; bpc -= 2) {
+ if (intel_hdmi_bpc_possible(crtc_state, bpc,
+ intel_dp->has_hdmi_sink, ycbcr420_output) &&
+ intel_dp_tmds_clock_valid(intel_dp, clock, bpc, ycbcr420_output,
+ respect_downstream_limits) == MODE_OK)
+ return bpc;
+ }
- return intel_hdmi_bpc_possible(crtc_state, bpc, intel_dp->has_hdmi_sink,
- intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
- intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
+ return -EINVAL;
}
static int intel_dp_max_bpp(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ bool respect_downstream_limits)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_connector *intel_connector = intel_dp->attached_connector;
@@ -1179,10 +1206,14 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
if (intel_dp->dfp.min_tmds_clock) {
- for (; bpc >= 10; bpc -= 2) {
- if (intel_dp_hdmi_bpc_possible(intel_dp, crtc_state, bpc))
- break;
- }
+ int max_hdmi_bpc;
+
+ max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc,
+ respect_downstream_limits);
+ if (max_hdmi_bpc < 0)
+ return 0;
+
+ bpc = min(bpc, max_hdmi_bpc);
}
bpp = bpc * 3;
@@ -1424,13 +1455,13 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay,
- pipe_config->bigjoiner,
+ pipe_config->bigjoiner_pipes,
pipe_bpp);
dsc_dp_slice_count =
intel_dp_dsc_get_slice_count(intel_dp,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay,
- pipe_config->bigjoiner);
+ pipe_config->bigjoiner_pipes);
if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
drm_dbg_kms(&dev_priv->drm,
"Compressed BPP/Slice Count not supported\n");
@@ -1464,7 +1495,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* then we need to use 2 VDSC instances.
*/
if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
- pipe_config->bigjoiner) {
+ pipe_config->bigjoiner_pipes) {
if (pipe_config->dsc.slice_count < 2) {
drm_dbg_kms(&dev_priv->drm,
"Cannot split stream to use 2 VDSC instances\n");
@@ -1497,13 +1528,16 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ struct drm_connector_state *conn_state,
+ bool respect_downstream_limits)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
+ bool joiner_needs_dsc = false;
int ret;
limits.min_rate = intel_dp_common_rate(intel_dp, 0);
@@ -1513,7 +1547,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
- limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
+ limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config, respect_downstream_limits);
if (intel_dp->use_max_params) {
/*
@@ -1537,7 +1571,14 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
adjusted_mode->crtc_clock))
- pipe_config->bigjoiner = true;
+ pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
+
+ /*
+ * Pipe joiner needs compression up to display 12 due to bandwidth
+ * limitation. DG2 onwards pipe joiner can be enabled without
+ * compression.
+ */
+ joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes;
/*
* Optimize for slow and wide for everything, because there are some
@@ -1545,13 +1586,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
*/
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
- /*
- * Pipe joiner needs compression upto display12 due to BW limitation. DG2
- * onwards pipe joiner can be enabled without compression.
- */
- drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
- if (ret || intel_dp->force_dsc_en || (DISPLAY_VER(i915) < 13 &&
- pipe_config->bigjoiner)) {
+ if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) {
+ drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ str_yes_no(ret), str_yes_no(joiner_needs_dsc),
+ str_yes_no(intel_dp->force_dsc_en));
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits);
if (ret < 0)
@@ -1786,6 +1824,137 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
}
+static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder)
+{
+ /* M1/N1 is double buffered */
+ if (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915))
+ return true;
+
+ return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
+}
+
+static bool can_enable_drrs(struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_display_mode *downclock_mode)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ if (pipe_config->vrr.enable)
+ return false;
+
+ /*
+ * DRRS and PSR can't be enable together, so giving preference to PSR
+ * as it allows more power-savings by complete shutting down display,
+ * so to guarantee this, intel_drrs_compute_config() must be called
+ * after intel_psr_compute_config().
+ */
+ if (pipe_config->has_psr)
+ return false;
+
+ /* FIXME missing FDI M2/N2 etc. */
+ if (pipe_config->has_pch_encoder)
+ return false;
+
+ if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
+ return false;
+
+ return downclock_mode &&
+ intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
+}
+
+static void
+intel_dp_drrs_compute_config(struct intel_connector *connector,
+ struct intel_crtc_state *pipe_config,
+ int output_bpp, bool constant_n)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *downclock_mode =
+ intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
+ int pixel_clock;
+
+ if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
+ if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
+ intel_zero_m_n(&pipe_config->dp_m2_n2);
+ return;
+ }
+
+ if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
+ pipe_config->msa_timing_delay = i915->vbt.edp.drrs_msa_timing_delay;
+
+ pipe_config->has_drrs = true;
+
+ pixel_clock = downclock_mode->clock;
+ if (pipe_config->splitter.enable)
+ pixel_clock /= pipe_config->splitter.link_count;
+
+ intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
+ pipe_config->port_clock, &pipe_config->dp_m2_n2,
+ constant_n, pipe_config->fec_enable);
+
+ /* FIXME: abstract this better */
+ if (pipe_config->splitter.enable)
+ pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count;
+}
+
+static bool intel_dp_has_audio(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+
+ if (!intel_dp_port_has_audio(i915, encoder->port))
+ return false;
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ return intel_dp->has_audio;
+ else
+ return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
+static int
+intel_dp_compute_output_format(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ bool respect_downstream_limits)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ bool ycbcr_420_only;
+ int ret;
+
+ ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
+
+ crtc_state->output_format = intel_dp_output_format(connector, ycbcr_420_only);
+
+ if (ycbcr_420_only && !intel_dp_is_ycbcr420(intel_dp, crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ }
+
+ ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
+ respect_downstream_limits);
+ if (ret) {
+ if (intel_dp_is_ycbcr420(intel_dp, crtc_state) ||
+ !connector->base.ycbcr_420_allowed ||
+ !drm_mode_is_420_also(info, adjusted_mode))
+ return ret;
+
+ crtc_state->output_format = intel_dp_output_format(connector, true);
+ ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
+ respect_downstream_limits);
+ }
+
+ return ret;
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -1794,38 +1963,19 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- enum port port = encoder->port;
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct intel_digital_connector_state *intel_conn_state =
- to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *fixed_mode;
+ struct intel_connector *connector = intel_dp->attached_connector;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
pipe_config->has_pch_encoder = true;
- pipe_config->output_format = intel_dp_output_format(&intel_connector->base,
- adjusted_mode);
-
- if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
- ret = intel_panel_fitting(pipe_config, conn_state);
- if (ret)
- return ret;
- }
-
- if (!intel_dp_port_has_audio(dev_priv, port))
- pipe_config->has_audio = false;
- else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio = intel_dp->has_audio;
- else
- pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
-
- if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
- ret = intel_panel_compute_config(intel_connector, adjusted_mode);
- if (ret)
- return ret;
+ pipe_config->has_audio = intel_dp_has_audio(encoder, pipe_config, conn_state);
- ret = intel_panel_fitting(pipe_config, conn_state);
+ fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
+ if (intel_dp_is_edp(intel_dp) && fixed_mode) {
+ ret = intel_panel_compute_config(connector, adjusted_mode);
if (ret)
return ret;
}
@@ -1843,10 +1993,23 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
return -EINVAL;
- ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
- if (ret < 0)
+ /*
+ * Try to respect downstream TMDS clock limits first, if
+ * that fails assume the user might know something we don't.
+ */
+ ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true);
+ if (ret)
+ ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false);
+ if (ret)
return ret;
+ if ((intel_dp_is_edp(intel_dp) && fixed_mode) ||
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ ret = intel_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
+ }
+
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
@@ -1892,8 +2055,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
- intel_drrs_compute_config(intel_dp, pipe_config, output_bpp,
- constant_n);
+ intel_dp_drrs_compute_config(connector, pipe_config,
+ output_bpp, constant_n);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
@@ -1976,7 +2139,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
if (ret < 0)
drm_dbg_kms(&i915->drm,
"Failed to %s sink decompression state\n",
- enabledisable(enable));
+ str_enable_disable(enable));
}
static void
@@ -2452,7 +2615,7 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
- enabledisable(intel_dp->has_hdmi_sink));
+ str_enable_disable(intel_dp->has_hdmi_sink));
tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
@@ -2461,45 +2624,15 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
drm_dbg_kms(&i915->drm,
"Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
- enabledisable(intel_dp->dfp.ycbcr_444_to_420));
+ str_enable_disable(intel_dp->dfp.ycbcr_444_to_420));
- tmp = 0;
- if (intel_dp->dfp.rgb_to_ycbcr) {
- bool bt2020, bt709;
-
- /*
- * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
- * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
- *
- */
- tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
-
- bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
- intel_dp->downstream_ports,
- DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
- bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
- intel_dp->downstream_ports,
- DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
- switch (crtc_state->infoframes.vsc.colorimetry) {
- case DP_COLORIMETRY_BT2020_RGB:
- case DP_COLORIMETRY_BT2020_YCC:
- if (bt2020)
- tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
- break;
- case DP_COLORIMETRY_BT709_YCC:
- case DP_COLORIMETRY_XVYCC_709:
- if (bt709)
- tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
- break;
- default:
- break;
- }
- }
+ tmp = intel_dp->dfp.rgb_to_ycbcr ?
+ DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
drm_dbg_kms(&i915->drm,
"Failed to %s protocol converter RGB->YCbCr conversion mode\n",
- enabledisable(tmp));
+ str_enable_disable(tmp));
}
@@ -2572,9 +2705,9 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
drm_mode_set_name(mode);
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] using generated MSO mode: ",
- connector->base.base.id, connector->base.name);
- drm_mode_debug_printmodeline(mode);
+ "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name,
+ DRM_MODE_ARG(mode));
}
static void intel_edp_mso_init(struct intel_dp *intel_dp)
@@ -2787,8 +2920,9 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
encoder->base.base.id, encoder->base.name,
- yesno(intel_dp_mst_source_support(intel_dp)), yesno(sink_can_mst),
- yesno(i915->params.enable_dp_mst));
+ str_yes_no(intel_dp_mst_source_support(intel_dp)),
+ str_yes_no(sink_can_mst),
+ str_yes_no(i915->params.enable_dp_mst));
if (!intel_dp_mst_source_support(intel_dp))
return;
@@ -4347,9 +4481,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
intel_dp->downstream_ports);
rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
intel_dp->downstream_ports,
- DP_DS_HDMI_BT601_RGB_YCBCR_CONV |
- DP_DS_HDMI_BT709_RGB_YCBCR_CONV |
- DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
if (DISPLAY_VER(i915) >= 11) {
/* Let PCON convert from RGB->YCbCr if possible */
@@ -4375,9 +4507,9 @@ intel_dp_update_420(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
connector->base.base.id, connector->base.name,
- yesno(intel_dp->dfp.rgb_to_ycbcr),
- yesno(connector->base.ycbcr_420_allowed),
- yesno(intel_dp->dfp.ycbcr_444_to_420));
+ str_yes_no(intel_dp->dfp.rgb_to_ycbcr),
+ str_yes_no(connector->base.ycbcr_420_allowed),
+ str_yes_no(intel_dp->dfp.ycbcr_444_to_420));
}
static void
@@ -4586,17 +4718,8 @@ static int intel_dp_get_modes(struct drm_connector *connector)
num_modes = intel_connector_update_modes(connector, edid);
/* Also add fixed mode, which may or may not be present in EDID */
- if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
- intel_connector->panel.fixed_mode) {
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev,
- intel_connector->panel.fixed_mode);
- if (mode) {
- drm_mode_probed_add(connector, mode);
- num_modes++;
- }
- }
+ if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
+ num_modes += intel_panel_get_modes(intel_connector);
if (num_modes)
return num_modes;
@@ -4648,9 +4771,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (lspcon_init(dig_port)) {
lspcon_detect_hdr_capability(lspcon);
if (lspcon->hdr_supported)
- drm_object_attach_property(&connector->base,
- connector->dev->mode_config.hdr_output_metadata_property,
- 0);
+ drm_connector_attach_hdr_output_metadata_property(connector);
}
return ret;
@@ -4919,6 +5040,25 @@ bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return intel_bios_is_port_edp(dev_priv, port);
}
+static bool
+has_gamut_metadata_dip(struct drm_i915_private *i915, enum port port)
+{
+ if (intel_bios_is_lspcon_present(i915, port))
+ return false;
+
+ if (DISPLAY_VER(i915) >= 11)
+ return true;
+
+ if (port == PORT_A)
+ return false;
+
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
+ DISPLAY_VER(i915) >= 9)
+ return true;
+
+ return false;
+}
+
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
@@ -4945,10 +5085,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_dp_colorspace_property(connector);
}
- if (IS_GEMINILAKE(dev_priv) || DISPLAY_VER(dev_priv) >= 11)
- drm_object_attach_property(&connector->base,
- connector->dev->mode_config.hdr_output_metadata_property,
- 0);
+ if (has_gamut_metadata_dip(dev_priv, port))
+ drm_connector_attach_hdr_output_metadata_property(connector);
if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
@@ -4967,14 +5105,30 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
drm_connector_attach_vrr_capable_property(connector);
}
+static void
+intel_edp_add_properties(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_preferred_fixed_mode(connector);
+
+ if (!fixed_mode)
+ return;
+
+ drm_connector_set_panel_orientation_with_quirk(&connector->base,
+ i915->vbt.orientation,
+ fixed_mode->hdisplay,
+ fixed_mode->vdisplay);
+}
+
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector = &intel_connector->base;
- struct drm_display_mode *fixed_mode = NULL;
- struct drm_display_mode *downclock_mode = NULL;
+ struct drm_display_mode *fixed_mode;
bool has_dpcd;
enum pipe pipe = INVALID_PIPE;
struct edid *edid;
@@ -5031,20 +5185,20 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
- fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
- if (fixed_mode)
- downclock_mode = intel_drrs_init(intel_connector, fixed_mode);
+ intel_panel_add_edid_fixed_modes(intel_connector,
+ dev_priv->vbt.drrs_type != DRRS_TYPE_NONE);
/* MSO requires information from the EDID */
intel_edp_mso_init(intel_dp);
/* multiply the mode clock and horizontal timings for MSO */
- intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
- intel_edp_mso_mode_fixup(intel_connector, downclock_mode);
+ list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head)
+ intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
/* fallback to VBT if available for eDP */
- if (!fixed_mode)
- fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
+ intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+
mutex_unlock(&dev->mode_config.mutex);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -5066,16 +5220,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pipe_name(pipe));
}
- intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+ intel_panel_init(intel_connector);
+
if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_backlight_setup(intel_connector, pipe);
- if (fixed_mode) {
- drm_connector_set_panel_orientation_with_quirk(connector,
- dev_priv->vbt.orientation,
- fixed_mode->hdisplay, fixed_mode->vdisplay);
- }
+ intel_edp_add_properties(intel_dp);
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 82d024dafe7b..a7640dbcf00e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -6,9 +6,9 @@
* Sean Paul <seanpaul@chromium.org>
*/
-#include <drm/dp/drm_dp_helper.h>
-#include <drm/dp/drm_dp_mst_helper.h>
-#include <drm/drm_hdcp.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_hdcp_helper.h>
#include <drm/drm_print.h>
#include "intel_ddi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 5d98773efd1b..9feaf1a589f3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -55,6 +55,7 @@ static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
}
static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
enum drm_dp_phy dp_phy)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
@@ -63,7 +64,7 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
- if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
+ if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"[ENCODER:%d:%s][%s] failed to read the PHY caps\n",
encoder->base.base.id, encoder->base.name, phy_name);
@@ -77,23 +78,15 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
phy_caps);
}
-static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
+static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
- if (intel_dp_is_edp(intel_dp))
- return false;
-
- /*
- * Detecting LTTPRs must be avoided on platforms with an AUX timeout
- * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
- */
- if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
- return false;
+ int ret;
- if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
- intel_dp->lttpr_common_caps) < 0)
+ ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd,
+ intel_dp->lttpr_common_caps);
+ if (ret < 0)
goto reset_caps;
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
@@ -122,14 +115,14 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
}
-static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
+static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int lttpr_count;
int i;
- if (!intel_dp_read_lttpr_common_caps(intel_dp))
+ if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
return 0;
lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
@@ -168,7 +161,7 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
}
for (i = 0; i < lttpr_count; i++)
- intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i));
+ intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i));
return lttpr_count;
}
@@ -193,9 +186,30 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
*/
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
- int lttpr_count = intel_dp_init_lttpr(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int lttpr_count = 0;
- /* The DPTX shall read the DPRX caps after LTTPR detection. */
+ /*
+ * Detecting LTTPRs must be avoided on platforms with an AUX timeout
+ * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
+ */
+ if (!intel_dp_is_edp(intel_dp) &&
+ (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
+ return -EIO;
+
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
+ return -EIO;
+
+ lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
+ }
+
+ /*
+ * The DPTX shall read the DPRX caps after LTTPR detection, so re-read
+ * it here.
+ */
if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
intel_dp_reset_lttpr_common_caps(intel_dp);
return -EIO;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index dc1556b46b85..7fa1c0833096 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -6,7 +6,7 @@
#ifndef __INTEL_DP_LINK_TRAINING_H__
#define __INTEL_DP_LINK_TRAINING_H__
-#include <drm/dp/drm_dp_helper.h>
+#include <drm/display/drm_dp_helper.h>
struct intel_crtc_state;
struct intel_dp;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index e30e698aa684..061b277e5ce7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -398,9 +398,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
if (ret) {
drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
}
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder,
- old_crtc_state, old_conn_state);
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}
static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
@@ -599,8 +598,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_crtc_vblank_on(pipe_config);
- if (pipe_config->has_audio)
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
/* Enable hdcp if it's desired */
if (conn_state->content_protection ==
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 44edeb2e55c0..cc6abe761f5e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -24,6 +24,7 @@
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
+#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 14f5ffe27d05..6eef0b8a91eb 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -4,6 +4,7 @@
*/
#include <linux/kernel.h>
+#include <linux/string_helpers.h>
#include "intel_crtc.h"
#include "intel_de.h"
@@ -17,7 +18,10 @@
#include "vlv_sideband.h"
struct intel_dpll_funcs {
- int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state);
+ int (*crtc_compute_clock)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
};
struct intel_limit {
@@ -253,12 +257,12 @@ static const struct intel_limit ilk_limits_dual_lvds_100m = {
static const struct intel_limit intel_limits_vlv = {
/*
- * These are the data rate limits (measured in fast clocks)
+ * These are based on the data rate limits (measured in fast clocks)
* since those are the strictest limits we have. The fast
* clock and actual rate limits are more relaxed, so checking
* them would make no difference.
*/
- .dot = { .min = 25000 * 5, .max = 270000 * 5 },
+ .dot = { .min = 25000, .max = 270000 },
.vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
.m1 = { .min = 2, .max = 3 },
@@ -269,12 +273,12 @@ static const struct intel_limit intel_limits_vlv = {
static const struct intel_limit intel_limits_chv = {
/*
- * These are the data rate limits (measured in fast clocks)
+ * These are based on the data rate limits (measured in fast clocks)
* since those are the strictest limits we have. The fast
* clock and actual rate limits are more relaxed, so checking
* them would make no difference.
*/
- .dot = { .min = 25000 * 5, .max = 540000 * 5},
+ .dot = { .min = 25000, .max = 540000 },
.vco = { .min = 4800000, .max = 6480000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
@@ -284,8 +288,7 @@ static const struct intel_limit intel_limits_chv = {
};
static const struct intel_limit intel_limits_bxt = {
- /* FIXME: find real dot limits */
- .dot = { .min = 0, .max = INT_MAX },
+ .dot = { .min = 25000, .max = 594000 },
.vco = { .min = 4800000, .max = 6700000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
@@ -336,26 +339,26 @@ int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
int vlv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
+ clock->p = clock->p1 * clock->p2 * 5;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return 0;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
- return clock->dot / 5;
+ return clock->dot;
}
int chv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
+ clock->p = clock->p1 * clock->p2 * 5;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return 0;
clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
- return clock->dot / 5;
+ return clock->dot;
}
/*
@@ -424,8 +427,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
/*
* Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ * refclk, or FALSE.
*
* Target and reference clocks are specified in kHz.
*
@@ -483,8 +485,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
/*
* Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ * refclk, or FALSE.
*
* Target and reference clocks are specified in kHz.
*
@@ -540,8 +541,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
/*
* Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ * refclk, or FALSE.
*
* Target and reference clocks are specified in kHz.
*
@@ -640,8 +640,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
/*
* Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ * refclk, or FALSE.
*/
static bool
vlv_find_best_dpll(const struct intel_limit *limit,
@@ -658,8 +657,6 @@ vlv_find_best_dpll(const struct intel_limit *limit,
int max_n = min(limit->n.max, refclk / 19200);
bool found = false;
- target *= 5; /* fast clock */
-
memset(best_clock, 0, sizeof(*best_clock));
/* based on hardware requirement, prefer smaller n to precision */
@@ -667,7 +664,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
clock.p2 -= clock.p2 > 10 ? 2 : 1) {
- clock.p = clock.p1 * clock.p2;
+ clock.p = clock.p1 * clock.p2 * 5;
/* based on hardware requirement, prefer bigger m1,m2 values */
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
unsigned int ppm;
@@ -701,8 +698,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
/*
* Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ * refclk, or FALSE.
*/
static bool
chv_find_best_dpll(const struct intel_limit *limit,
@@ -728,7 +724,6 @@ chv_find_best_dpll(const struct intel_limit *limit,
*/
clock.n = 1;
clock.m1 = 2;
- target *= 5; /* fast clock */
for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
for (clock.p2 = limit->p2.p2_fast;
@@ -736,7 +731,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
clock.p2 -= clock.p2 > 10 ? 2 : 1) {
unsigned int error_ppm;
- clock.p = clock.p1 * clock.p2;
+ clock.p = clock.p1 * clock.p2 * 5;
m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
refclk * clock.m1);
@@ -767,8 +762,8 @@ chv_find_best_dpll(const struct intel_limit *limit,
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock)
{
- int refclk = 100000;
const struct intel_limit *limit = &intel_limits_bxt;
+ int refclk = 100000;
return chv_find_best_dpll(limit, crtc_state,
crtc_state->port_clock, refclk,
@@ -935,32 +930,48 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
crtc_state->dpll_hw_state.dpll = dpll;
}
-static int hsw_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
+ return 0;
+}
+
+static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
-
- if (IS_DG2(dev_priv))
- return intel_mpllb_calc_state(crtc_state, encoder);
+ int ret;
if (DISPLAY_VER(dev_priv) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
+ ret = intel_reserve_shared_dplls(state, crtc, encoder);
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
- return -EINVAL;
+ return ret;
}
return 0;
}
+static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+
+ return intel_mpllb_calc_state(crtc_state, encoder);
+}
+
static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
{
return dpll->m < factor * dpll->n;
@@ -1076,18 +1087,15 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
crtc_state->dpll_hw_state.dpll = dpll;
}
-static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 120000;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!crtc_state->has_pch_encoder)
return 0;
@@ -1126,11 +1134,27 @@ static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state)
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
- if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
+ return 0;
+}
+
+static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ int ret;
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!crtc_state->has_pch_encoder)
+ return 0;
+
+ ret = intel_reserve_shared_dplls(state, crtc, NULL);
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
- return -EINVAL;
+ return ret;
}
return 0;
@@ -1171,14 +1195,14 @@ void chv_compute_dpll(struct intel_crtc_state *crtc_state)
(crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int chv_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- int refclk = 100000;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit = &intel_limits_chv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
+ int refclk = 100000;
if (!crtc_state->clock_set &&
!chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
@@ -1192,14 +1216,14 @@ static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
-static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- int refclk = 100000;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit = &intel_limits_vlv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
+ int refclk = 100000;
if (!crtc_state->clock_set &&
!vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
@@ -1213,16 +1237,15 @@ static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
-static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 96000;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
@@ -1259,16 +1282,15 @@ static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
-static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 96000;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
@@ -1296,16 +1318,15 @@ static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
-static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 96000;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
@@ -1333,16 +1354,15 @@ static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
-static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 48000;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
@@ -1372,12 +1392,18 @@ static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
+static const struct intel_dpll_funcs dg2_dpll_funcs = {
+ .crtc_compute_clock = dg2_crtc_compute_clock,
+};
+
static const struct intel_dpll_funcs hsw_dpll_funcs = {
.crtc_compute_clock = hsw_crtc_compute_clock,
+ .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
};
static const struct intel_dpll_funcs ilk_dpll_funcs = {
.crtc_compute_clock = ilk_crtc_compute_clock,
+ .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
};
static const struct intel_dpll_funcs chv_dpll_funcs = {
@@ -1404,18 +1430,54 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = {
.crtc_compute_clock = i8xx_crtc_compute_clock,
};
-int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+
+ if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
+ return 0;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ return i915->dpll_funcs->crtc_compute_clock(state, crtc);
+}
+
+int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+
+ if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
+ return 0;
- return i915->dpll_funcs->crtc_compute_clock(crtc_state);
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ if (!i915->dpll_funcs->crtc_get_shared_dpll)
+ return 0;
+
+ return i915->dpll_funcs->crtc_get_shared_dpll(state, crtc);
}
void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
- if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
+ if (IS_DG2(dev_priv))
+ dev_priv->dpll_funcs = &dg2_dpll_funcs;
+ else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
dev_priv->dpll_funcs = &hsw_dpll_funcs;
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->dpll_funcs = &ilk_dpll_funcs;
@@ -1945,7 +2007,7 @@ static void assert_pll(struct drm_i915_private *dev_priv,
cur_state = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
+ str_on_off(state), str_on_off(cur_state));
}
void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 69b06a9e473e..bbc30542f29f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -10,12 +10,16 @@
struct dpll;
struct drm_i915_private;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
enum pipe;
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
-int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state);
+int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
int pnv_calc_dpll_params(int refclk, struct dpll *clock);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 569903d47aea..22f55574a35c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -21,6 +21,8 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/string_helpers.h>
+
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
@@ -88,9 +90,9 @@ struct intel_shared_dpll_funcs {
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
- bool (*get_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
+ int (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
void (*put_dplls)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*update_active_dpll)(struct intel_atomic_state *state,
@@ -178,13 +180,14 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state hw_state;
if (drm_WARN(&dev_priv->drm, !pll,
- "asserting DPLL %s with no DPLL\n", onoff(state)))
+ "asserting DPLL %s with no DPLL\n", str_on_off(state)))
return;
cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
- pll->info->name, onoff(state), onoff(cur_state));
+ pll->info->name, str_on_off(state),
+ str_on_off(cur_state));
}
static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
@@ -511,9 +514,9 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
udelay(200);
}
-static bool ibx_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int ibx_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -538,7 +541,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
}
if (!pll)
- return false;
+ return -EINVAL;
/* reference the pll */
intel_reference_shared_dpll(state, crtc,
@@ -546,7 +549,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
crtc_state->shared_dpll = pll;
- return true;
+ return 0;
}
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -581,7 +584,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
};
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
@@ -832,7 +835,7 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
{
u64 freq2k;
unsigned p, n2, r2;
- struct hsw_wrpll_rnp best = { 0, 0, 0 };
+ struct hsw_wrpll_rnp best = {};
unsigned budget;
freq2k = clock / 100;
@@ -1057,16 +1060,13 @@ static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
return link_clock * 2;
}
-static bool hsw_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int hsw_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
+ struct intel_shared_dpll *pll = NULL;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
pll = hsw_ddi_wrpll_get_dpll(state, crtc);
@@ -1074,18 +1074,16 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
pll = hsw_ddi_lcpll_get_dpll(crtc_state);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
pll = hsw_ddi_spll_get_dpll(state, crtc);
- else
- return false;
if (!pll)
- return false;
+ return -EINVAL;
intel_reference_shared_dpll(state, crtc,
pll, &crtc_state->dpll_hw_state);
crtc_state->shared_dpll = pll;
- return true;
+ return 0;
}
static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
@@ -1330,13 +1328,6 @@ struct skl_wrpll_context {
unsigned int p; /* chosen divider */
};
-static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
-{
- memset(ctx, 0, sizeof(*ctx));
-
- ctx->min_deviation = U64_MAX;
-}
-
/* DCO freq must be within +1%/-6% of the DCO central freq */
#define SKL_DCO_MAX_PDEVIATION 100
#define SKL_DCO_MAX_NDEVIATION 600
@@ -1497,33 +1488,33 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
params->dco_integer * MHz(1)) * 0x8000, MHz(1));
}
-static bool
+static int
skl_ddi_calculate_wrpll(int clock /* in Hz */,
int ref_clock,
struct skl_wrpll_params *wrpll_params)
{
- u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
- u64 dco_central_freq[3] = { 8400000000ULL,
- 9000000000ULL,
- 9600000000ULL };
- static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
- 24, 28, 30, 32, 36, 40, 42, 44,
- 48, 52, 54, 56, 60, 64, 66, 68,
- 70, 72, 76, 78, 80, 84, 88, 90,
- 92, 96, 98 };
- static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+ static const u64 dco_central_freq[3] = { 8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL };
+ static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 24, 28, 30, 32, 36, 40, 42, 44,
+ 48, 52, 54, 56, 60, 64, 66, 68,
+ 70, 72, 76, 78, 80, 84, 88, 90,
+ 92, 96, 98 };
+ static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
static const struct {
- const int *list;
+ const u8 *list;
int n_dividers;
} dividers[] = {
{ even_dividers, ARRAY_SIZE(even_dividers) },
{ odd_dividers, ARRAY_SIZE(odd_dividers) },
};
- struct skl_wrpll_context ctx;
+ struct skl_wrpll_context ctx = {
+ .min_deviation = U64_MAX,
+ };
unsigned int dco, d, i;
unsigned int p0, p1, p2;
-
- skl_wrpll_context_init(&ctx);
+ u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
@@ -1556,7 +1547,7 @@ skip_remaining_dividers:
if (!ctx.p) {
DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
- return false;
+ return -EINVAL;
}
/*
@@ -1568,14 +1559,15 @@ skip_remaining_dividers:
skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
ctx.central_freq, p0, p1, p2);
- return true;
+ return 0;
}
-static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
+static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wrpll_params wrpll_params = {};
u32 ctrl1, cfgcr1, cfgcr2;
- struct skl_wrpll_params wrpll_params = { 0, };
+ int ret;
/*
* See comment in intel_dpll_hw_state to understand why we always use 0
@@ -1585,10 +1577,10 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
- if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
- i915->dpll.ref_clks.nssc,
- &wrpll_params))
- return false;
+ ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->dpll.ref_clks.nssc, &wrpll_params);
+ if (ret)
+ return ret;
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -1600,13 +1592,11 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
crtc_state->dpll_hw_state.ctrl1 = ctrl1;
crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
- return true;
+
+ return 0;
}
static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
@@ -1680,7 +1670,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
return dco_freq / (p0 * p1 * p2 * 5);
}
-static bool
+static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
u32 ctrl1;
@@ -1712,12 +1702,9 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
break;
}
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- return true;
+ return 0;
}
static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
@@ -1754,33 +1741,23 @@ static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
return link_clock * 2;
}
-static bool skl_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int skl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
- bool bret;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- bret = skl_ddi_hdmi_pll_dividers(crtc_state);
- if (!bret) {
- drm_dbg_kms(&i915->drm,
- "Could not get HDMI pll dividers.\n");
- return false;
- }
- } else if (intel_crtc_has_dp_encoder(crtc_state)) {
- bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
- if (!bret) {
- drm_dbg_kms(&i915->drm,
- "Could not set DP dpll HW state.\n");
- return false;
- }
- } else {
- return false;
- }
+ int ret;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ ret = skl_ddi_hdmi_pll_dividers(crtc_state);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ ret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
+ else
+ ret = -EINVAL;
+ if (ret)
+ return ret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
pll = intel_find_shared_dpll(state, crtc,
@@ -1793,14 +1770,14 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
BIT(DPLL_ID_SKL_DPLL2) |
BIT(DPLL_ID_SKL_DPLL1));
if (!pll)
- return false;
+ return -EINVAL;
intel_reference_shared_dpll(state, crtc,
pll, &crtc_state->dpll_hw_state);
crtc_state->shared_dpll = pll;
- return true;
+ return 0;
}
static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
@@ -1902,7 +1879,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
/* Write M2 integer */
temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
- temp &= ~PORT_PLL_M2_MASK;
+ temp &= ~PORT_PLL_M2_INT_MASK;
temp |= pll->state.hw_state.pll0;
intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
@@ -2038,7 +2015,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
- hw_state->pll0 &= PORT_PLL_M2_MASK;
+ hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
@@ -2087,82 +2064,64 @@ out:
return ret;
}
-/* bxt clock parameters */
-struct bxt_clk_div {
- int clock;
- u32 p1;
- u32 p2;
- u32 m2_int;
- u32 m2_frac;
- bool m2_frac_en;
- u32 n;
-
- int vco;
-};
-
/* pre-calculated values for DP linkrates */
-static const struct bxt_clk_div bxt_dp_clk_val[] = {
- {162000, 4, 2, 32, 1677722, 1, 1},
- {270000, 4, 1, 27, 0, 0, 1},
- {540000, 2, 1, 27, 0, 0, 1},
- {216000, 3, 2, 32, 1677722, 1, 1},
- {243000, 4, 1, 24, 1258291, 1, 1},
- {324000, 4, 1, 32, 1677722, 1, 1},
- {432000, 3, 1, 32, 1677722, 1, 1}
+static const struct dpll bxt_dp_clk_val[] = {
+ /* m2 is .22 binary fixed point */
+ { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
+ { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
+ { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
+ { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
+ { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
+ { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
+ { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
};
-static bool
+static int
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
- struct bxt_clk_div *clk_div)
+ struct dpll *clk_div)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct dpll best_clock;
/* Calculate HDMI div */
/*
* FIXME: tie the following calculation into
* i9xx_crtc_compute_clock
*/
- if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
+ if (!bxt_find_best_dpll(crtc_state, clk_div)) {
drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
crtc_state->port_clock,
pipe_name(crtc->pipe));
- return false;
+ return -EINVAL;
}
- clk_div->p1 = best_clock.p1;
- clk_div->p2 = best_clock.p2;
- drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
- clk_div->n = best_clock.n;
- clk_div->m2_int = best_clock.m2 >> 22;
- clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
- clk_div->m2_frac_en = clk_div->m2_frac != 0;
+ drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
- clk_div->vco = best_clock.vco;
-
- return true;
+ return 0;
}
static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
- struct bxt_clk_div *clk_div)
+ struct dpll *clk_div)
{
- int clock = crtc_state->port_clock;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
int i;
*clk_div = bxt_dp_clk_val[0];
for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
- if (bxt_dp_clk_val[i].clock == clock) {
+ if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
*clk_div = bxt_dp_clk_val[i];
break;
}
}
- clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
+ chv_calc_dpll_params(i915->dpll.ref_clks.nssc, clk_div);
+
+ drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
+ clk_div->dot != crtc_state->port_clock);
}
-static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
- const struct bxt_clk_div *clk_div)
+static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
+ const struct dpll *clk_div)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
@@ -2171,8 +2130,6 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
u32 prop_coef, int_coef, gain_ctl, targ_cnt;
u32 lanestagger;
- memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
-
if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
int_coef = 9;
@@ -2191,7 +2148,7 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
targ_cnt = 9;
} else {
drm_err(&i915->drm, "Invalid VCO\n");
- return false;
+ return -EINVAL;
}
if (clock > 270000)
@@ -2206,45 +2163,45 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
lanestagger = 0x02;
dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
- dpll_hw_state->pll0 = clk_div->m2_int;
+ dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
- dpll_hw_state->pll2 = clk_div->m2_frac;
+ dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
- if (clk_div->m2_frac_en)
+ if (clk_div->m2 & 0x3fffff)
dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
- dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
- dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
+ dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
+ PORT_PLL_INT_COEFF(int_coef) |
+ PORT_PLL_GAIN_CTL(gain_ctl);
- dpll_hw_state->pll8 = targ_cnt;
+ dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
- dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+ dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
- dpll_hw_state->pll10 =
- PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
- | PORT_PLL_DCO_AMP_OVR_EN_H;
+ dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
+ PORT_PLL_DCO_AMP_OVR_EN_H;
dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
- return true;
+ return 0;
}
-static bool
+static int
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
- struct bxt_clk_div clk_div = {};
+ struct dpll clk_div = {};
bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
-static bool
+static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
- struct bxt_clk_div clk_div = {};
+ struct dpll clk_div = {};
bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
@@ -2258,33 +2215,35 @@ static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
struct dpll clock;
clock.m1 = 2;
- clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+ clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
- clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
- clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
- clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+ clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
+ clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
+ clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
+ clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
}
-static bool bxt_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int bxt_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
+ int ret;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
- !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
- return false;
-
- if (intel_crtc_has_dp_encoder(crtc_state) &&
- !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
- return false;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ ret = bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ ret = bxt_ddi_dp_set_dpll_hw_state(crtc_state);
+ else
+ ret = -EINVAL;
+ if (ret)
+ return ret;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
@@ -2298,7 +2257,7 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
crtc_state->shared_dpll = pll;
- return true;
+ return 0;
}
static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
@@ -2535,8 +2494,8 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
/* the following params are unused */
};
-static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
- struct skl_wrpll_params *pll_params)
+static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *pll_params)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
@@ -2549,16 +2508,16 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
if (clock == params[i].clock) {
*pll_params = params[i].wrpll;
- return true;
+ return 0;
}
}
MISSING_CASE(clock);
- return false;
+ return -EINVAL;
}
-static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
- struct skl_wrpll_params *pll_params)
+static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *pll_params)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -2590,7 +2549,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
}
}
- return true;
+ return 0;
}
static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
@@ -2620,7 +2579,7 @@ static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
return ref_clock;
}
-static bool
+static int
icl_calc_wrpll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *wrpll_params)
{
@@ -2655,13 +2614,13 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
}
if (best_div == 0)
- return false;
+ return -EINVAL;
icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
pdiv, qdiv, kdiv);
- return true;
+ return 0;
}
static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
@@ -2731,8 +2690,6 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
{
u32 dco_fraction = pll_params->dco_fraction;
- memset(pll_state, 0, sizeof(*pll_state));
-
if (ehl_combo_pll_div_frac_wa_needed(i915))
dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
@@ -2753,13 +2710,13 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
}
-static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
- u32 *target_dco_khz,
- struct intel_dpll_hw_state *state,
- bool is_dkl)
+static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
+ u32 *target_dco_khz,
+ struct intel_dpll_hw_state *state,
+ bool is_dkl)
{
+ static const u8 div1_vals[] = { 7, 5, 3, 2 };
u32 dco_min_freq, dco_max_freq;
- int div1_vals[] = {7, 5, 3, 2};
unsigned int i;
int div2;
@@ -2822,19 +2779,19 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
hsdiv |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
- return true;
+ return 0;
}
}
- return false;
+ return -EINVAL;
}
/*
* The specification for this function uses real numbers, so the math had to be
* adapted to integer-only calculation, that's why it looks so different.
*/
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
- struct intel_dpll_hw_state *pll_state)
+static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
+ struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int refclk_khz = dev_priv->dpll.ref_clks.nssc;
@@ -2848,14 +2805,14 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
+ int ret;
- memset(pll_state, 0, sizeof(*pll_state));
-
- if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
- pll_state, is_dkl)) {
+ ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
+ pll_state, is_dkl);
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Failed to find divisors for clock %d\n", clock);
- return false;
+ return ret;
}
m1div = 2;
@@ -2870,7 +2827,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
drm_dbg_kms(&dev_priv->drm,
"Failed to find mdiv for clock %d\n",
clock);
- return false;
+ return -EINVAL;
}
}
m2div_rem = dco_khz % (refclk_khz * m1div);
@@ -2897,7 +2854,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
break;
default:
MISSING_CASE(refclk_khz);
- return false;
+ return -EINVAL;
}
/*
@@ -3040,7 +2997,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
}
- return true;
+ return 0;
}
static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
@@ -3162,9 +3119,9 @@ static u32 intel_get_hti_plls(struct drm_i915_private *i915)
return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
}
-static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -3182,11 +3139,10 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
- if (!ret) {
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Could not calculate combo PHY PLL state.\n");
-
- return false;
+ return ret;
}
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
@@ -3231,7 +3187,7 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
drm_dbg_kms(&dev_priv->drm,
"No combo PHY PLL found for [ENCODER:%d:%s]\n",
encoder->base.base.id, encoder->base.name);
- return false;
+ return -EINVAL;
}
intel_reference_shared_dpll(state, crtc,
@@ -3239,12 +3195,12 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
icl_update_active_dpll(state, crtc, encoder);
- return true;
+ return 0;
}
-static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
@@ -3252,12 +3208,14 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
struct skl_wrpll_params pll_params = { };
struct icl_port_dpll *port_dpll;
enum intel_dpll_id dpll_id;
+ int ret;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
+ ret = icl_calc_tbt_pll(crtc_state, &pll_params);
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Could not calculate TBT PLL state.\n");
- return false;
+ return ret;
}
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
@@ -3267,14 +3225,15 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll) {
drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
- return false;
+ return -EINVAL;
}
intel_reference_shared_dpll(state, crtc,
port_dpll->pll, &port_dpll->hw_state);
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
- if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
+ ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Could not calculate MG PHY PLL state.\n");
goto err_unreference_tbt_pll;
@@ -3286,6 +3245,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
&port_dpll->hw_state,
BIT(dpll_id));
if (!port_dpll->pll) {
+ ret = -EINVAL;
drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
goto err_unreference_tbt_pll;
}
@@ -3294,18 +3254,18 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
icl_update_active_dpll(state, crtc, encoder);
- return true;
+ return 0;
err_unreference_tbt_pll:
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
- return false;
+ return ret;
}
-static bool icl_get_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int icl_get_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
@@ -3317,7 +3277,7 @@ static bool icl_get_dplls(struct intel_atomic_state *state,
MISSING_CASE(phy);
- return false;
+ return -EINVAL;
}
static void icl_put_dplls(struct intel_atomic_state *state,
@@ -4103,13 +4063,12 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
/**
* intel_shared_dpll_init - Initialize shared DPLLs
- * @dev: drm device
+ * @dev_priv: i915 device
*
- * Initialize shared DPLLs for @dev.
+ * Initialize shared DPLLs for @dev_priv.
*/
-void intel_shared_dpll_init(struct drm_device *dev)
+void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
int i;
@@ -4148,7 +4107,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
dpll_info = dpll_mgr->dpll_info;
for (i = 0; dpll_info[i].name; i++) {
- drm_WARN_ON(dev, i != dpll_info[i].id);
+ drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
}
@@ -4176,17 +4135,18 @@ void intel_shared_dpll_init(struct drm_device *dev)
* intel_release_shared_dplls().
*
* Returns:
- * True if all required DPLLs were successfully reserved.
+ * 0 if all required DPLLs were successfully reserved,
+ * negative error code otherwise.
*/
-bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+int intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
- return false;
+ return -EINVAL;
return dpll_mgr->get_dplls(state, crtc, encoder);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index ba2fdfce1579..f7c96a1f13c8 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -37,7 +37,6 @@
__a > __b ? (__a - __b) : (__b - __a); })
enum tc_port;
-struct drm_device;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
@@ -337,9 +336,9 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
+int intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
@@ -356,7 +355,7 @@ bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
-void intel_shared_dpll_init(struct drm_device *dev);
+void intel_shared_dpll_init(struct drm_i915_private *dev_priv);
void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv);
void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 05dd7dba3a5c..fb0e7e79e0cd 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -249,7 +249,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
if (HAS_LMEM(i915))
- dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
+ dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
else
dpt_obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(dpt_obj))
@@ -300,5 +300,5 @@ void intel_dpt_destroy(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- i915_vm_close(&dpt->vm);
+ i915_vm_put(&dpt->vm);
}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index fa715b8ea310..166caf293f7b 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -47,74 +47,36 @@
* requested by userspace.
*/
-static bool can_enable_drrs(struct intel_connector *connector,
- const struct intel_crtc_state *pipe_config)
+const char *intel_drrs_type_str(enum drrs_type drrs_type)
{
- const struct drm_i915_private *i915 = to_i915(connector->base.dev);
-
- if (pipe_config->vrr.enable)
- return false;
-
- /*
- * DRRS and PSR can't be enable together, so giving preference to PSR
- * as it allows more power-savings by complete shutting down display,
- * so to guarantee this, intel_drrs_compute_config() must be called
- * after intel_psr_compute_config().
- */
- if (pipe_config->has_psr)
- return false;
-
- return connector->panel.downclock_mode &&
- i915->drrs.type == SEAMLESS_DRRS_SUPPORT;
-}
-
-void
-intel_drrs_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- int output_bpp, bool constant_n)
-{
- struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- int pixel_clock;
-
- if (!can_enable_drrs(connector, pipe_config)) {
- if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
- intel_zero_m_n(&pipe_config->dp_m2_n2);
- return;
- }
-
- pipe_config->has_drrs = true;
-
- pixel_clock = connector->panel.downclock_mode->clock;
- if (pipe_config->splitter.enable)
- pixel_clock /= pipe_config->splitter.link_count;
+ static const char * const str[] = {
+ [DRRS_TYPE_NONE] = "none",
+ [DRRS_TYPE_STATIC] = "static",
+ [DRRS_TYPE_SEAMLESS] = "seamless",
+ };
- intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
- pipe_config->port_clock, &pipe_config->dp_m2_n2,
- constant_n, pipe_config->fec_enable);
+ if (drrs_type >= ARRAY_SIZE(str))
+ return "<invalid>";
- /* FIXME: abstract this better */
- if (pipe_config->splitter.enable)
- pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count;
+ return str[drrs_type];
}
static void
-intel_drrs_set_refresh_rate_pipeconf(const struct intel_crtc_state *crtc_state,
- enum drrs_refresh_rate_type refresh_type)
+intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
+ enum drrs_refresh_rate refresh_rate)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
u32 val, bit;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- bit = PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ bit = PIPECONF_REFRESH_RATE_ALT_VLV;
else
- bit = PIPECONF_EDP_RR_MODE_SWITCH;
+ bit = PIPECONF_REFRESH_RATE_ALT_ILK;
val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
- if (refresh_type == DRRS_LOW_RR)
+ if (refresh_rate == DRRS_REFRESH_RATE_LOW)
val |= bit;
else
val &= ~bit;
@@ -123,244 +85,171 @@ intel_drrs_set_refresh_rate_pipeconf(const struct intel_crtc_state *crtc_state,
}
static void
-intel_drrs_set_refresh_rate_m_n(const struct intel_crtc_state *crtc_state,
- enum drrs_refresh_rate_type refresh_type)
+intel_drrs_set_refresh_rate_m_n(struct intel_crtc *crtc,
+ enum drrs_refresh_rate refresh_rate)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- intel_cpu_transcoder_set_m1_n1(crtc, crtc_state->cpu_transcoder,
- refresh_type == DRRS_LOW_RR ?
- &crtc_state->dp_m2_n2 : &crtc_state->dp_m_n);
+ intel_cpu_transcoder_set_m1_n1(crtc, crtc->drrs.cpu_transcoder,
+ refresh_rate == DRRS_REFRESH_RATE_LOW ?
+ &crtc->drrs.m2_n2 : &crtc->drrs.m_n);
}
-static void intel_drrs_set_state(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state,
- enum drrs_refresh_rate_type refresh_type)
+bool intel_drrs_is_active(struct intel_crtc *crtc)
{
- struct intel_dp *intel_dp = dev_priv->drrs.dp;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_display_mode *mode;
-
- if (!intel_dp) {
- drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
- return;
- }
-
- if (!crtc) {
- drm_dbg_kms(&dev_priv->drm,
- "DRRS: intel_crtc not initialized\n");
- return;
- }
-
- if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
- drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
- return;
- }
+ return crtc->drrs.cpu_transcoder != INVALID_TRANSCODER;
+}
- if (refresh_type == dev_priv->drrs.refresh_rate_type)
- return;
+static void intel_drrs_set_state(struct intel_crtc *crtc,
+ enum drrs_refresh_rate refresh_rate)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!crtc_state->hw.active) {
- drm_dbg_kms(&dev_priv->drm,
- "eDP encoder disabled. CRTC not Active\n");
+ if (refresh_rate == crtc->drrs.refresh_rate)
return;
- }
- if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv))
- intel_drrs_set_refresh_rate_m_n(crtc_state, refresh_type);
- else if (DISPLAY_VER(dev_priv) > 6)
- intel_drrs_set_refresh_rate_pipeconf(crtc_state, refresh_type);
+ if (intel_cpu_transcoder_has_m2_n2(dev_priv, crtc->drrs.cpu_transcoder))
+ intel_drrs_set_refresh_rate_pipeconf(crtc, refresh_rate);
+ else
+ intel_drrs_set_refresh_rate_m_n(crtc, refresh_rate);
- dev_priv->drrs.refresh_rate_type = refresh_type;
+ crtc->drrs.refresh_rate = refresh_rate;
+}
- if (refresh_type == DRRS_LOW_RR)
- mode = intel_dp->attached_connector->panel.downclock_mode;
- else
- mode = intel_dp->attached_connector->panel.fixed_mode;
- drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
- drm_mode_vrefresh(mode));
+static void intel_drrs_schedule_work(struct intel_crtc *crtc)
+{
+ mod_delayed_work(system_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
}
-static void
-intel_drrs_enable_locked(struct intel_dp *intel_dp)
+static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ unsigned int frontbuffer_bits;
+
+ frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
- dev_priv->drrs.busy_frontbuffer_bits = 0;
- dev_priv->drrs.dp = intel_dp;
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
+ crtc_state->bigjoiner_pipes)
+ frontbuffer_bits |= INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
+
+ return frontbuffer_bits;
}
/**
- * intel_drrs_enable - init drrs struct if supported
- * @intel_dp: DP struct
- * @crtc_state: A pointer to the active crtc state.
+ * intel_drrs_activate - activate DRRS
+ * @crtc_state: the crtc state
*
- * Initializes frontbuffer_bits and drrs.dp
+ * Activates DRRS on the crtc.
*/
-void intel_drrs_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+void intel_drrs_activate(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (!crtc_state->has_drrs)
return;
- drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
+ if (!crtc_state->hw.active)
+ return;
- mutex_lock(&dev_priv->drrs.mutex);
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ return;
- if (dev_priv->drrs.dp) {
- drm_warn(&dev_priv->drm, "DRRS already enabled\n");
- goto unlock;
- }
+ mutex_lock(&crtc->drrs.mutex);
- intel_drrs_enable_locked(intel_dp);
+ crtc->drrs.cpu_transcoder = crtc_state->cpu_transcoder;
+ crtc->drrs.m_n = crtc_state->dp_m_n;
+ crtc->drrs.m2_n2 = crtc_state->dp_m2_n2;
+ crtc->drrs.frontbuffer_bits = intel_drrs_frontbuffer_bits(crtc_state);
+ crtc->drrs.busy_frontbuffer_bits = 0;
-unlock:
- mutex_unlock(&dev_priv->drrs.mutex);
-}
+ intel_drrs_schedule_work(crtc);
-static void
-intel_drrs_disable_locked(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- intel_drrs_set_state(dev_priv, crtc_state, DRRS_HIGH_RR);
- dev_priv->drrs.dp = NULL;
+ mutex_unlock(&crtc->drrs.mutex);
}
/**
- * intel_drrs_disable - Disable DRRS
- * @intel_dp: DP struct
- * @old_crtc_state: Pointer to old crtc_state.
+ * intel_drrs_deactivate - deactivate DRRS
+ * @old_crtc_state: the old crtc state
*
+ * Deactivates DRRS on the crtc.
*/
-void intel_drrs_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
+void intel_drrs_deactivate(const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
if (!old_crtc_state->has_drrs)
return;
- mutex_lock(&dev_priv->drrs.mutex);
- if (!dev_priv->drrs.dp) {
- mutex_unlock(&dev_priv->drrs.mutex);
+ if (!old_crtc_state->hw.active)
return;
- }
-
- intel_drrs_disable_locked(intel_dp, old_crtc_state);
- mutex_unlock(&dev_priv->drrs.mutex);
-
- cancel_delayed_work_sync(&dev_priv->drrs.work);
-}
-
-/**
- * intel_drrs_update - Update DRRS state
- * @intel_dp: Intel DP
- * @crtc_state: new CRTC state
- *
- * This function will update DRRS states, disabling or enabling DRRS when
- * executing fastsets. For full modeset, intel_drrs_disable() and
- * intel_drrs_enable() should be called instead.
- */
-void
-intel_drrs_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
+ if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
return;
- mutex_lock(&dev_priv->drrs.mutex);
+ mutex_lock(&crtc->drrs.mutex);
- /* New state matches current one? */
- if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
- goto unlock;
+ if (intel_drrs_is_active(crtc))
+ intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH);
- if (crtc_state->has_drrs)
- intel_drrs_enable_locked(intel_dp);
- else
- intel_drrs_disable_locked(intel_dp, crtc_state);
+ crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
+ crtc->drrs.frontbuffer_bits = 0;
+ crtc->drrs.busy_frontbuffer_bits = 0;
+
+ mutex_unlock(&crtc->drrs.mutex);
-unlock:
- mutex_unlock(&dev_priv->drrs.mutex);
+ cancel_delayed_work_sync(&crtc->drrs.work);
}
static void intel_drrs_downclock_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), drrs.work.work);
- struct intel_dp *intel_dp;
- struct drm_crtc *crtc;
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- intel_dp = dev_priv->drrs.dp;
+ struct intel_crtc *crtc = container_of(work, typeof(*crtc), drrs.work.work);
- if (!intel_dp)
- goto unlock;
+ mutex_lock(&crtc->drrs.mutex);
- /*
- * The delayed work can race with an invalidate hence we need to
- * recheck.
- */
+ if (intel_drrs_is_active(crtc) && !crtc->drrs.busy_frontbuffer_bits)
+ intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_LOW);
- if (dev_priv->drrs.busy_frontbuffer_bits)
- goto unlock;
-
- crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- intel_drrs_set_state(dev_priv, to_intel_crtc(crtc)->config, DRRS_LOW_RR);
-
-unlock:
- mutex_unlock(&dev_priv->drrs.mutex);
+ mutex_unlock(&crtc->drrs.mutex);
}
static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits,
+ unsigned int all_frontbuffer_bits,
bool invalidate)
{
- struct intel_dp *intel_dp;
- struct drm_crtc *crtc;
- enum pipe pipe;
+ struct intel_crtc *crtc;
- if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
+ if (dev_priv->vbt.drrs_type != DRRS_TYPE_SEAMLESS)
return;
- cancel_delayed_work(&dev_priv->drrs.work);
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ unsigned int frontbuffer_bits;
- mutex_lock(&dev_priv->drrs.mutex);
+ mutex_lock(&crtc->drrs.mutex);
- intel_dp = dev_priv->drrs.dp;
- if (!intel_dp) {
- mutex_unlock(&dev_priv->drrs.mutex);
- return;
- }
+ frontbuffer_bits = all_frontbuffer_bits & crtc->drrs.frontbuffer_bits;
+ if (!frontbuffer_bits) {
+ mutex_unlock(&crtc->drrs.mutex);
+ continue;
+ }
- crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
+ if (invalidate)
+ crtc->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+ else
+ crtc->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
- if (invalidate)
- dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
- else
- dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
-
- /* flush/invalidate means busy screen hence upclock */
- if (frontbuffer_bits)
- intel_drrs_set_state(dev_priv, to_intel_crtc(crtc)->config,
- DRRS_HIGH_RR);
-
- /*
- * flush also means no more activity hence schedule downclock, if all
- * other fbs are quiescent too
- */
- if (!invalidate && !dev_priv->drrs.busy_frontbuffer_bits)
- schedule_delayed_work(&dev_priv->drrs.work,
- msecs_to_jiffies(1000));
- mutex_unlock(&dev_priv->drrs.mutex);
+ /* flush/invalidate means busy screen hence upclock */
+ intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH);
+
+ /*
+ * flush also means no more activity hence schedule downclock, if all
+ * other fbs are quiescent too
+ */
+ if (!crtc->drrs.busy_frontbuffer_bits)
+ intel_drrs_schedule_work(crtc);
+ else
+ cancel_delayed_work(&crtc->drrs.work);
+
+ mutex_unlock(&crtc->drrs.mutex);
+ }
}
/**
@@ -397,68 +286,17 @@ void intel_drrs_flush(struct drm_i915_private *dev_priv,
intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
}
-void intel_drrs_page_flip(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- unsigned int frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
-
- intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
-}
-
/**
- * intel_drrs_init - Init basic DRRS work and mutex.
- * @connector: eDP connector
- * @fixed_mode: preferred mode of panel
+ * intel_crtc_drrs_init - Init DRRS for CRTC
+ * @crtc: crtc
*
- * This function is called only once at driver load to initialize basic
+ * This function is called only once at driver load to initialize basic
* DRRS stuff.
*
- * Returns:
- * Downclock mode if panel supports it, else return NULL.
- * DRRS support is determined by the presence of downclock mode (apart
- * from VBT setting).
*/
-struct drm_display_mode *
-intel_drrs_init(struct intel_connector *connector,
- struct drm_display_mode *fixed_mode)
+void intel_crtc_drrs_init(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_encoder *encoder = connector->encoder;
- struct drm_display_mode *downclock_mode = NULL;
-
- INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
- mutex_init(&dev_priv->drrs.mutex);
-
- if (DISPLAY_VER(dev_priv) <= 6) {
- drm_dbg_kms(&dev_priv->drm,
- "DRRS supported for Gen7 and above\n");
- return NULL;
- }
-
- if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) &&
- encoder->port != PORT_A) {
- drm_dbg_kms(&dev_priv->drm,
- "DRRS only supported on eDP port A\n");
- return NULL;
- }
-
- if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
- drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
- return NULL;
- }
-
- downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
- if (!downclock_mode) {
- drm_dbg_kms(&dev_priv->drm,
- "Downclock mode is not found. DRRS not supported\n");
- return NULL;
- }
-
- dev_priv->drrs.type = dev_priv->vbt.drrs_type;
-
- dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
- drm_dbg_kms(&dev_priv->drm,
- "seamless DRRS supported for eDP panel.\n");
- return downclock_mode;
+ INIT_DELAYED_WORK(&crtc->drrs.work, intel_drrs_downclock_work);
+ mutex_init(&crtc->drrs.mutex);
+ crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
index 9ec9c447211a..3ad1be1ad9c1 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.h
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -8,29 +8,21 @@
#include <linux/types.h>
+enum drrs_type;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_connector;
-struct intel_dp;
-void intel_drrs_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_drrs_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_drrs_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+const char *intel_drrs_type_str(enum drrs_type drrs_type);
+bool intel_drrs_is_active(struct intel_crtc *crtc);
+void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
+void intel_drrs_deactivate(const struct intel_crtc_state *crtc_state);
void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
-void intel_drrs_page_flip(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void intel_drrs_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- int output_bpp, bool constant_n);
-struct drm_display_mode *intel_drrs_init(struct intel_connector *connector,
- struct drm_display_mode *fixed_mode);
+void intel_crtc_drrs_init(struct intel_crtc *crtc);
#endif /* __INTEL_DRRS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index b34a67309976..c4affcb216fd 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -283,14 +283,12 @@ void intel_dsb_prepare(struct intel_crtc_state *crtc_state)
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
- drm_err(&i915->drm, "Gem object creation failed\n");
kfree(dsb);
goto out;
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
- drm_err(&i915->drm, "Vma creation failed\n");
i915_gem_object_put(obj);
kfree(dsb);
goto out;
@@ -298,7 +296,6 @@ void intel_dsb_prepare(struct intel_crtc_state *crtc_state)
buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
- drm_err(&i915->drm, "Command buffer creation failed\n");
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
kfree(dsb);
goto out;
@@ -311,6 +308,10 @@ void intel_dsb_prepare(struct intel_crtc_state *crtc_state)
dsb->ins_start_offset = 0;
crtc_state->dsb = dsb;
out:
+ if (!crtc_state->dsb)
+ drm_info(&i915->drm,
+ "DSB queue setup failed, will fallback to MMIO for display HW programming\n");
+
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index a50422e03a7e..389a8c24cdc1 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -34,26 +34,7 @@ int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
int intel_dsi_get_modes(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_display_mode *mode;
-
- drm_dbg_kms(&i915->drm, "\n");
-
- if (!intel_connector->panel.fixed_mode) {
- drm_dbg_kms(&i915->drm, "no fixed mode\n");
- return 0;
- }
-
- mode = drm_mode_duplicate(connector->dev,
- intel_connector->panel.fixed_mode);
- if (!mode) {
- drm_dbg_kms(&i915->drm, "drm_mode_duplicate failed\n");
- return 0;
- }
-
- drm_mode_probed_add(connector, mode);
- return 1;
+ return intel_panel_get_modes(to_intel_connector(connector));
}
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
@@ -61,7 +42,8 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(intel_connector, mode);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
enum drm_mode_status status;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 6b4a27372c82..dd24aef925f2 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -30,6 +30,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include <asm/unaligned.h>
@@ -124,9 +125,25 @@ struct i2c_adapter_lookup {
#define ICL_GPIO_DDPA_CTRLCLK_2 8
#define ICL_GPIO_DDPA_CTRLDATA_2 9
-static enum port intel_dsi_seq_port_to_port(u8 port)
+static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
+ u8 seq_port)
{
- return port ? PORT_C : PORT_A;
+ /*
+ * If single link DSI is being used on any port, the VBT sequence block
+ * send packet apparently always has 0 for the port. Just use the port
+ * we have configured, and ignore the sequence block port.
+ */
+ if (hweight8(intel_dsi->ports) == 1)
+ return ffs(intel_dsi->ports) - 1;
+
+ if (seq_port) {
+ if (intel_dsi->ports & PORT_B)
+ return PORT_B;
+ else if (intel_dsi->ports & PORT_C)
+ return PORT_C;
+ }
+
+ return PORT_A;
}
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
@@ -148,15 +165,10 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
seq_port = (flags >> MIPI_PORT_SHIFT) & 3;
- /* For DSI single link on Port A & C, the seq_port value which is
- * parsed from Sequence Block#53 of VBT has been set to 0
- * Now, read/write of packets for the DSI single link on Port A and
- * Port C will based on the DVO port from VBT block 2.
- */
- if (intel_dsi->ports == (1 << PORT_C))
- port = PORT_C;
- else
- port = intel_dsi_seq_port_to_port(seq_port);
+ port = intel_dsi_seq_port_to_port(intel_dsi, seq_port);
+
+ if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port]))
+ goto out;
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
@@ -686,9 +698,9 @@ void intel_dsi_log_params(struct intel_dsi *intel_dsi)
intel_dsi->burst_mode_ratio);
drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
drm_dbg_kms(&i915->drm, "Eot %s\n",
- enableddisabled(intel_dsi->eotp_pkt));
+ str_enabled_disabled(intel_dsi->eotp_pkt));
drm_dbg_kms(&i915->drm, "Clockstop %s\n",
- enableddisabled(!intel_dsi->clock_stop));
+ str_enabled_disabled(!intel_dsi->clock_stop));
drm_dbg_kms(&i915->drm, "Mode %s\n",
intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
@@ -715,7 +727,7 @@ void intel_dsi_log_params(struct intel_dsi *intel_dsi)
drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
intel_dsi->clk_hs_to_lp_count);
drm_dbg_kms(&i915->drm, "BTA %s\n",
- enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+ str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 2eeb209afc64..5572e43026e4 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -226,7 +226,7 @@ intel_dvo_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dvo *intel_dvo = intel_attached_dvo(intel_connector);
const struct drm_display_mode *fixed_mode =
- intel_connector->panel.fixed_mode;
+ intel_panel_fixed_mode(intel_connector, mode);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int target_clock = mode->clock;
@@ -257,9 +257,9 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
{
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- const struct drm_display_mode *fixed_mode =
- intel_dvo->attached_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(intel_dvo->attached_connector, adjusted_mode);
/*
* If we have timings from the BIOS for the panel, put them in
@@ -333,8 +333,6 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
static int intel_dvo_get_modes(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- const struct drm_display_mode *fixed_mode =
- to_intel_connector(connector)->panel.fixed_mode;
int num_modes;
/*
@@ -348,17 +346,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
if (num_modes)
return num_modes;
- if (fixed_mode) {
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, fixed_mode);
- if (mode) {
- drm_mode_probed_add(connector, mode);
- num_modes++;
- }
- }
-
- return num_modes;
+ return intel_panel_get_modes(to_intel_connector(connector));
}
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
@@ -390,27 +378,6 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
-/*
- * Attempts to get a fixed panel timing for LVDS (currently only the i830).
- *
- * Other chips with DVO LVDS will need to extend this to deal with the LVDS
- * chip being on DVOB/C and having multiple pipes.
- */
-static struct drm_display_mode *
-intel_dvo_get_current_mode(struct intel_encoder *encoder)
-{
- struct drm_display_mode *mode;
-
- mode = intel_encoder_current_mode(encoder);
- if (mode) {
- DRM_DEBUG_KMS("using current (BIOS) mode: ");
- drm_mode_debug_printmodeline(mode);
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- }
-
- return mode;
-}
-
static enum port intel_dvo_port(i915_reg_t dvo_reg)
{
if (i915_mmio_reg_equal(dvo_reg, DVOA))
@@ -561,9 +528,11 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
* headers, likely), so for now, just get the current
* mode being output through DVO.
*/
- intel_panel_init(&intel_connector->panel,
- intel_dvo_get_current_mode(intel_encoder),
- NULL);
+ intel_panel_add_encoder_fixed_mode(intel_connector,
+ intel_encoder);
+
+ intel_panel_init(intel_connector);
+
intel_dvo->panel_wants_dither = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 23cfe2e5ce2a..9f5a6b79e95b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -107,6 +107,21 @@ static const struct drm_format_info gen12_ccs_cc_formats[] = {
.hsub = 1, .vsub = 1, .has_alpha = true },
};
+static const struct drm_format_info gen12_flat_ccs_cc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+};
+
struct intel_modifier_desc {
u64 modifier;
struct {
@@ -135,11 +150,32 @@ struct intel_modifier_desc {
INTEL_PLANE_CAP_CCS_MC)
#define INTEL_PLANE_CAP_TILING_MASK (INTEL_PLANE_CAP_TILING_X | \
INTEL_PLANE_CAP_TILING_Y | \
- INTEL_PLANE_CAP_TILING_Yf)
+ INTEL_PLANE_CAP_TILING_Yf | \
+ INTEL_PLANE_CAP_TILING_4)
#define INTEL_PLANE_CAP_TILING_NONE 0
static const struct intel_modifier_desc intel_modifiers[] = {
{
+ .modifier = I915_FORMAT_MOD_4_TILED_DG2_MC_CCS,
+ .display_ver = { 13, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_MC,
+ }, {
+ .modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC,
+ .display_ver = { 13, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC_CC,
+
+ .ccs.cc_planes = BIT(1),
+
+ FORMAT_OVERRIDE(gen12_flat_ccs_cc_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS,
+ .display_ver = { 13, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC,
+ }, {
+ .modifier = I915_FORMAT_MOD_4_TILED,
+ .display_ver = { 13, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4,
+ }, {
.modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
.display_ver = { 12, 13 },
.plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_MC,
@@ -380,17 +416,13 @@ bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier)
static bool format_is_yuv_semiplanar(const struct intel_modifier_desc *md,
const struct drm_format_info *info)
{
- int yuv_planes;
-
if (!info->is_yuv)
return false;
- if (plane_caps_contain_any(md->plane_caps, INTEL_PLANE_CAP_CCS_MASK))
- yuv_planes = 4;
+ if (hweight8(md->ccs.planar_aux_planes) == 2)
+ return info->num_planes == 4;
else
- yuv_planes = 2;
-
- return info->num_planes == yuv_planes;
+ return info->num_planes == 2;
}
/**
@@ -515,12 +547,13 @@ static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_p
int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
{
+ const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
struct drm_i915_private *i915 = to_i915(fb->dev);
- if (intel_fb_is_ccs_modifier(fb->modifier))
+ if (md->ccs.packed_aux_planes | md->ccs.planar_aux_planes)
return main_to_ccs_plane(fb, main_plane);
else if (DISPLAY_VER(i915) < 11 &&
- intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ format_is_yuv_semiplanar(md, fb->format))
return 1;
else
return 0;
@@ -545,6 +578,15 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 128;
else
return 512;
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED:
+ /*
+ * Each 4K tile consists of 64B(8*8) subtiles, with
+ * same shape as Y Tile(i.e 4*16B OWords)
+ */
+ return 128;
case I915_FORMAT_MOD_Y_TILED_CCS:
if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return 128;
@@ -650,6 +692,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
return I915_TILING_Y;
case INTEL_PLANE_CAP_TILING_X:
return I915_TILING_X;
+ case INTEL_PLANE_CAP_TILING_4:
case INTEL_PLANE_CAP_TILING_Yf:
case INTEL_PLANE_CAP_TILING_NONE:
return I915_TILING_NONE;
@@ -737,8 +780,13 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_4_TILED:
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ return 16 * 1024;
default:
MISSING_CASE(fb->modifier);
return 0;
@@ -1981,7 +2029,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
/* object is backed with LMEM for discrete */
i915 = to_i915(obj->base.dev);
- if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
+ if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
/* object is "remote", not in local memory */
i915_gem_object_put(obj);
return ERR_PTR(-EREMOTE);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index ba9df8986c1e..12386f13a4e0 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -27,6 +27,7 @@ struct intel_plane_state;
#define INTEL_PLANE_CAP_TILING_X BIT(3)
#define INTEL_PLANE_CAP_TILING_Y BIT(4)
#define INTEL_PLANE_CAP_TILING_Yf BIT(5)
+#define INTEL_PLANE_CAP_TILING_4 BIT(6)
bool intel_fb_is_ccs_modifier(u64 modifier);
bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index a307b4993bcf..bd6e7c98e751 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -140,7 +140,7 @@ retry:
if (!ret && phys_cursor)
ret = i915_gem_object_attach_phys(obj, alignment);
else if (!ret && HAS_LMEM(dev_priv))
- ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
+ ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0);
/* TODO: Do we need to sync when migration becomes async? */
if (!ret)
ret = i915_gem_object_pin_pages(obj);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 3e61a8936245..bbdc34a23d54 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -38,9 +38,12 @@
* forcibly disable it to allow proper screen updates.
*/
+#include <linux/string_helpers.h>
+
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_cdclk.h"
#include "intel_de.h"
@@ -87,7 +90,6 @@ struct intel_fbc {
* with stolen_lock.
*/
struct mutex lock;
- unsigned int possible_framebuffer_bits;
unsigned int busy_bits;
struct drm_mm_node compressed_fb;
@@ -665,6 +667,10 @@ static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
static void intel_fbc_nuke(struct intel_fbc *fbc)
{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ drm_WARN_ON(&i915->drm, fbc->flip_pending);
+
trace_intel_fbc_nuke(fbc->state.plane);
fbc->funcs->nuke(fbc);
@@ -805,6 +811,14 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
fbc->funcs->program_cfb(fbc);
}
+static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
+{
+ /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,dg2,adlp */
+ if (DISPLAY_VER(fbc->i915) >= 11)
+ intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
+ DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+}
+
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
@@ -946,6 +960,7 @@ static bool tiling_is_valid(const struct intel_plane_state *plane_state)
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
return DISPLAY_VER(i915) >= 9;
+ case I915_FORMAT_MOD_4_TILED:
case I915_FORMAT_MOD_X_TILED:
return true;
default:
@@ -966,6 +981,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
struct intel_fbc_state *fbc_state = &fbc->state;
WARN_ON(plane_state->no_fbc_reason);
+ WARN_ON(fbc_state->plane && fbc_state->plane != plane);
fbc_state->plane = plane;
@@ -1078,7 +1094,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
*/
if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
plane_state->no_fbc_reason = "PSR2 enabled";
- return false;
+ return 0;
}
if (!pixel_format_is_valid(plane_state)) {
@@ -1104,7 +1120,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
fb->format->has_alpha) {
plane_state->no_fbc_reason = "per-pixel alpha not supported";
- return false;
+ return 0;
}
if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
@@ -1120,7 +1136,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (DISPLAY_VER(i915) >= 9 &&
plane_state->view.color_plane[0].y & 3) {
plane_state->no_fbc_reason = "plane start Y offset misaligned";
- return false;
+ return 0;
}
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
@@ -1128,7 +1144,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
(plane_state->view.color_plane[0].y +
(drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
- return false;
+ return 0;
}
/* WaFbcExceedCdClockThreshold:hsw,bdw */
@@ -1270,6 +1286,8 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
__intel_fbc_cleanup_cfb(fbc);
fbc->state.plane = NULL;
+ fbc->flip_pending = false;
+ fbc->busy_bits = 0;
}
static void __intel_fbc_post_update(struct intel_fbc *fbc)
@@ -1313,7 +1331,7 @@ static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
if (fbc->state.plane)
return fbc->state.plane->frontbuffer_bit;
else
- return fbc->possible_framebuffer_bits;
+ return 0;
}
static void __intel_fbc_invalidate(struct intel_fbc *fbc,
@@ -1325,11 +1343,14 @@ static void __intel_fbc_invalidate(struct intel_fbc *fbc,
mutex_lock(&fbc->lock);
- fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
+ frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
+ if (!frontbuffer_bits)
+ goto out;
- if (fbc->state.plane && fbc->busy_bits)
- intel_fbc_deactivate(fbc, "frontbuffer write");
+ fbc->busy_bits |= frontbuffer_bits;
+ intel_fbc_deactivate(fbc, "frontbuffer write");
+out:
mutex_unlock(&fbc->lock);
}
@@ -1351,18 +1372,22 @@ static void __intel_fbc_flush(struct intel_fbc *fbc,
{
mutex_lock(&fbc->lock);
+ frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
+ if (!frontbuffer_bits)
+ goto out;
+
fbc->busy_bits &= ~frontbuffer_bits;
if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
goto out;
- if (!fbc->busy_bits && fbc->state.plane &&
- (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
- if (fbc->active)
- intel_fbc_nuke(fbc);
- else if (!fbc->flip_pending)
- __intel_fbc_post_update(fbc);
- }
+ if (fbc->busy_bits || fbc->flip_pending)
+ goto out;
+
+ if (fbc->active)
+ intel_fbc_nuke(fbc);
+ else
+ intel_fbc_activate(fbc);
out:
mutex_unlock(&fbc->lock);
@@ -1445,6 +1470,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
intel_fbc_update_state(state, crtc, plane);
+ intel_fbc_program_workarounds(fbc);
intel_fbc_program_cfb(fbc);
}
@@ -1500,25 +1526,6 @@ void intel_fbc_update(struct intel_atomic_state *state,
}
}
-/**
- * intel_fbc_global_disable - globally disable FBC
- * @i915: i915 device instance
- *
- * This function disables FBC regardless of which CRTC is associated with it.
- */
-void intel_fbc_global_disable(struct drm_i915_private *i915)
-{
- struct intel_fbc *fbc;
- enum intel_fbc_id fbc_id;
-
- for_each_intel_fbc(i915, fbc, fbc_id) {
- mutex_lock(&fbc->lock);
- if (fbc->state.plane)
- __intel_fbc_disable(fbc);
- mutex_unlock(&fbc->lock);
- }
-}
-
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
@@ -1640,7 +1647,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
{
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
- if (intel_vtd_active(i915) &&
+ if (i915_vtd_active(i915) &&
(IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
drm_info(&i915->drm,
"Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
@@ -1652,11 +1659,7 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
{
- if (!fbc)
- return;
-
plane->fbc = fbc;
- fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
@@ -1709,22 +1712,26 @@ void intel_fbc_init(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
i915->params.enable_fbc);
- for_each_fbc_id(i915, fbc_id) {
- struct intel_fbc *fbc;
+ for_each_fbc_id(i915, fbc_id)
+ i915->fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
+}
- fbc = intel_fbc_create(i915, fbc_id);
- if (!fbc)
- continue;
+/**
+ * intel_fbc_sanitize - Sanitize FBC
+ * @i915: the i915 device
+ *
+ * Make sure FBC is initially disabled since we have no
+ * idea eg. into which parts of stolen it might be scribbling
+ * into.
+ */
+void intel_fbc_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_fbc *fbc;
+ enum intel_fbc_id fbc_id;
- /*
- * We still don't have any sort of hardware state readout
- * for FBC, so deactivate it in case the BIOS activated it
- * to make sure software matches the hardware state.
- */
+ for_each_intel_fbc(i915, fbc, fbc_id) {
if (intel_fbc_hw_is_active(fbc))
intel_fbc_hw_deactivate(fbc);
-
- i915->fbc[fbc->id] = fbc;
}
}
@@ -1743,7 +1750,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
if (fbc->active) {
seq_puts(m, "FBC enabled\n");
seq_printf(m, "Compressing: %s\n",
- yesno(intel_fbc_is_compressing(fbc)));
+ str_yes_no(intel_fbc_is_compressing(fbc)));
} else {
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 8c5a7339a27f..db60143295ec 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -30,10 +30,10 @@ void intel_fbc_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_cleanup(struct drm_i915_private *dev_priv);
+void intel_fbc_sanitize(struct drm_i915_private *dev_priv);
void intel_fbc_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
-void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 2cd62a187df3..221336178991 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -279,7 +279,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start =
(unsigned long)(ggtt->gmadr.start + vma->node.start);
- info->fix.smem_len = vma->node.size;
+ info->fix.smem_len = vma->size;
}
vaddr = i915_vma_pin_iomap(vma);
@@ -290,7 +290,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
info->screen_base = vaddr;
- info->screen_size = vma->node.size;
+ info->screen_size = vma->size;
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 4e4b43669b14..67d2484afbaa 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -3,6 +3,8 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/string_helpers.h>
+
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
@@ -34,7 +36,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
}
I915_STATE_WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
+ str_on_off(state), str_on_off(cur_state));
}
void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
@@ -55,7 +57,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
+ str_on_off(state), str_on_off(cur_state));
}
void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
@@ -93,7 +95,7 @@ static void assert_fdi_rx_pll(struct drm_i915_private *i915,
cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
+ str_on_off(state), str_on_off(cur_state));
}
void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 2fad03250661..a6ba7fb72339 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -31,13 +31,23 @@
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
-#include <drm/drm_hdcp.h>
+#include <drm/display/drm_hdcp_helper.h>
#include "i915_drv.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
+struct intel_gmbus {
+ struct i2c_adapter adapter;
+#define GMBUS_FORCE_BIT_RETRY (1U << 31)
+ u32 force_bit;
+ u32 reg0;
+ i915_reg_t gpio_reg;
+ struct i2c_algo_bit_data bit_algo;
+ struct drm_i915_private *dev_priv;
+};
+
struct gmbus_pin {
const char *name;
enum i915_gpio gpio;
@@ -106,51 +116,47 @@ static const struct gmbus_pin gmbus_pins_dg2[] = {
[GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
};
-/* pin is expected to be valid */
-static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
+static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
unsigned int pin)
{
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG2)
- return &gmbus_pins_dg2[pin];
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
- return &gmbus_pins_dg1[pin];
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- return &gmbus_pins_icp[pin];
- else if (HAS_PCH_CNP(dev_priv))
- return &gmbus_pins_cnp[pin];
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- return &gmbus_pins_bxt[pin];
- else if (DISPLAY_VER(dev_priv) == 9)
- return &gmbus_pins_skl[pin];
- else if (IS_BROADWELL(dev_priv))
- return &gmbus_pins_bdw[pin];
- else
- return &gmbus_pins[pin];
-}
-
-bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
- unsigned int pin)
-{
- unsigned int size;
+ const struct gmbus_pin *pins;
+ size_t size;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG2)
+ if (INTEL_PCH_TYPE(i915) >= PCH_DG2) {
+ pins = gmbus_pins_dg2;
size = ARRAY_SIZE(gmbus_pins_dg2);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);
- else if (HAS_PCH_CNP(dev_priv))
+ } else if (HAS_PCH_CNP(i915)) {
+ pins = gmbus_pins_cnp;
size = ARRAY_SIZE(gmbus_pins_cnp);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ pins = gmbus_pins_bxt;
size = ARRAY_SIZE(gmbus_pins_bxt);
- else if (DISPLAY_VER(dev_priv) == 9)
+ } else if (DISPLAY_VER(i915) == 9) {
+ pins = gmbus_pins_skl;
size = ARRAY_SIZE(gmbus_pins_skl);
- else if (IS_BROADWELL(dev_priv))
+ } else if (IS_BROADWELL(i915)) {
+ pins = gmbus_pins_bdw;
size = ARRAY_SIZE(gmbus_pins_bdw);
- else
+ } else {
+ pins = gmbus_pins;
size = ARRAY_SIZE(gmbus_pins);
+ }
+
+ if (pin >= size || !pins[pin].name)
+ return NULL;
+
+ return &pins[pin];
+}
- return pin < size && get_gmbus_pin(dev_priv, pin)->name;
+bool intel_gmbus_is_valid_pin(struct drm_i915_private *i915, unsigned int pin)
+{
+ return get_gmbus_pin(i915, pin);
}
/* Intel GPIO access functions */
@@ -294,9 +300,7 @@ static void set_data(void *data, int state_high)
static int
intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
- struct intel_gmbus *bus = container_of(adapter,
- struct intel_gmbus,
- adapter);
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
intel_gmbus_reset(dev_priv);
@@ -313,9 +317,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
static void
intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
- struct intel_gmbus *bus = container_of(adapter,
- struct intel_gmbus,
- adapter);
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
set_data(bus, 1);
@@ -326,14 +328,13 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
}
static void
-intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
+intel_gpio_setup(struct intel_gmbus *bus, i915_reg_t gpio_reg)
{
- struct drm_i915_private *dev_priv = bus->dev_priv;
struct i2c_algo_bit_data *algo;
algo = &bus->bit_algo;
- bus->gpio_reg = GPIO(get_gmbus_pin(dev_priv, pin)->gpio);
+ bus->gpio_reg = gpio_reg;
bus->adapter.algo_data = algo;
algo->setsda = set_data;
algo->setscl = set_clock;
@@ -614,9 +615,7 @@ static int
do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
u32 gmbus0_source)
{
- struct intel_gmbus *bus = container_of(adapter,
- struct intel_gmbus,
- adapter);
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
int i = 0, inc, try = 0;
int ret = 0;
@@ -746,8 +745,7 @@ out:
static int
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
- struct intel_gmbus *bus =
- container_of(adapter, struct intel_gmbus, adapter);
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
intel_wakeref_t wakeref;
int ret;
@@ -771,8 +769,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
{
- struct intel_gmbus *bus =
- container_of(adapter, struct intel_gmbus, adapter);
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
u8 cmd = DRM_HDCP_DDC_AKSV;
u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
@@ -863,7 +860,6 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
int intel_gmbus_setup(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- struct intel_gmbus *bus;
unsigned int pin;
int ret;
@@ -880,17 +876,24 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
init_waitqueue_head(&dev_priv->gmbus_wait_queue);
for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
- if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+ const struct gmbus_pin *gmbus_pin;
+ struct intel_gmbus *bus;
+
+ gmbus_pin = get_gmbus_pin(dev_priv, pin);
+ if (!gmbus_pin)
continue;
- bus = &dev_priv->gmbus[pin];
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto err;
+ }
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
- "i915 gmbus %s",
- get_gmbus_pin(dev_priv, pin)->name);
+ "i915 gmbus %s", gmbus_pin->name);
bus->adapter.dev.parent = &pdev->dev;
bus->dev_priv = dev_priv;
@@ -911,11 +914,15 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
if (IS_I830(dev_priv))
bus->force_bit = 1;
- intel_gpio_setup(bus, pin);
+ intel_gpio_setup(bus, GPIO(gmbus_pin->gpio));
ret = i2c_add_adapter(&bus->adapter);
- if (ret)
+ if (ret) {
+ kfree(bus);
goto err;
+ }
+
+ dev_priv->gmbus[pin] = bus;
}
intel_gmbus_reset(dev_priv);
@@ -923,24 +930,19 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
return 0;
err:
- while (pin--) {
- if (!intel_gmbus_is_valid_pin(dev_priv, pin))
- continue;
+ intel_gmbus_teardown(dev_priv);
- bus = &dev_priv->gmbus[pin];
- i2c_del_adapter(&bus->adapter);
- }
return ret;
}
struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (drm_WARN_ON(&dev_priv->drm,
- !intel_gmbus_is_valid_pin(dev_priv, pin)))
+ if (drm_WARN_ON(&dev_priv->drm, pin >= ARRAY_SIZE(dev_priv->gmbus) ||
+ !dev_priv->gmbus[pin]))
return NULL;
- return &dev_priv->gmbus[pin].adapter;
+ return &dev_priv->gmbus[pin]->adapter;
}
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
@@ -968,14 +970,18 @@ bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
void intel_gmbus_teardown(struct drm_i915_private *dev_priv)
{
- struct intel_gmbus *bus;
unsigned int pin;
for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
- if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+ struct intel_gmbus *bus;
+
+ bus = dev_priv->gmbus[pin];
+ if (!bus)
continue;
- bus = &dev_priv->gmbus[pin];
i2c_del_adapter(&bus->adapter);
+
+ kfree(bus);
+ dev_priv->gmbus[pin] = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index e1ecf38db0ef..44ac0cee8b77 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -12,7 +12,7 @@
#include <linux/i2c.h>
#include <linux/random.h>
-#include <drm/drm_hdcp.h>
+#include <drm/display/drm_hdcp_helper.h>
#include <drm/i915_component.h>
#include "i915_drv.h"
@@ -20,6 +20,7 @@
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_pcode.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 6512f014cad4..1ae09431f53a 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -30,12 +30,14 @@
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <drm/display/drm_hdcp_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/drm_hdcp.h>
-#include <drm/drm_scdc_helper.h>
#include <drm/intel_lpe_audio.h>
#include "i915_debugfs.h"
@@ -2637,7 +2639,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
drm_dbg_kms(&dev_priv->drm,
"[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
connector->base.id, connector->name,
- yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
+ str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10);
/* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
return drm_scdc_set_high_tmds_clock_ratio(adapter,
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 76357c9b76e4..7fbc8031a5aa 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -23,9 +23,9 @@
*
*/
+#include <drm/display/drm_dp_dual_mode_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/dp/drm_dp_dual_mode_helper.h>
-#include <drm/drm_edid.h>
#include "intel_de.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 9fced37bed70..e8478161f8b9 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -389,7 +389,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(intel_connector, mode);
int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
enum drm_mode_status status;
@@ -475,19 +476,12 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode;
/* use cached edid if we have one */
if (!IS_ERR_OR_NULL(intel_connector->edid))
return drm_add_edid_modes(connector, intel_connector->edid);
- mode = drm_mode_duplicate(dev, intel_connector->panel.fixed_mode);
- if (mode == NULL)
- return 0;
-
- drm_mode_probed_add(connector, mode);
- return 1;
+ return intel_panel_get_modes(intel_connector);
}
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -786,16 +780,18 @@ bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
- struct drm_device *dev = lvds_encoder->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(lvds_encoder->base.base.dev);
+ struct intel_connector *connector = lvds_encoder->attached_connector;
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_preferred_fixed_mode(connector);
unsigned int val;
- struct drm_i915_private *dev_priv = to_i915(dev);
/* use the module option value if specified */
if (dev_priv->params.lvds_channel_mode > 0)
return dev_priv->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
- if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
+ if (fixed_mode->clock > 112999)
return true;
if (dmi_check_system(intel_dual_link_lvds))
@@ -833,8 +829,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct drm_display_mode *fixed_mode = NULL;
- struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
i915_reg_t lvds_reg;
u32 lvds;
@@ -973,35 +967,30 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_connector->edid = edid;
- fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
- if (fixed_mode)
- goto out;
+ /* Try EDID first */
+ intel_panel_add_edid_fixed_modes(intel_connector,
+ dev_priv->vbt.drrs_type != DRRS_TYPE_NONE);
/* Failed to get EDID, what about VBT? */
- fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
- if (fixed_mode)
- goto out;
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
+ intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
/*
- * If we didn't get EDID, try checking if the panel is already turned
- * on. If so, assume that whatever is currently programmed is the
- * correct mode.
+ * If we didn't get a fixed mode from EDID or VBT, try checking
+ * if the panel is already turned on. If so, assume that
+ * whatever is currently programmed is the correct mode.
*/
- fixed_mode = intel_encoder_current_mode(intel_encoder);
- if (fixed_mode) {
- drm_dbg_kms(&dev_priv->drm, "using current (BIOS) mode: ");
- drm_mode_debug_printmodeline(fixed_mode);
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- }
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
+ intel_panel_add_encoder_fixed_mode(intel_connector, intel_encoder);
+
+ mutex_unlock(&dev->mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
- if (!fixed_mode)
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
goto failed;
-out:
- mutex_unlock(&dev->mode_config.mutex);
+ intel_panel_init(intel_connector);
- intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_backlight_setup(intel_connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
@@ -1013,8 +1002,6 @@ out:
return;
failed:
- mutex_unlock(&dev->mode_config.mutex);
-
drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 76845d34ad0c..ee46561b5ae8 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -958,19 +958,21 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
static int check_overlay_dst(struct intel_overlay *overlay,
struct drm_intel_overlay_put_image *rec)
{
- const struct intel_crtc_state *pipe_config =
+ const struct intel_crtc_state *crtc_state =
overlay->crtc->config;
+ struct drm_rect req, clipped;
- if (rec->dst_height == 0 || rec->dst_width == 0)
- return -EINVAL;
+ drm_rect_init(&req, rec->dst_x, rec->dst_y,
+ rec->dst_width, rec->dst_height);
- if (rec->dst_x < pipe_config->pipe_src_w &&
- rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
- rec->dst_y < pipe_config->pipe_src_h &&
- rec->dst_y + rec->dst_height <= pipe_config->pipe_src_h)
- return 0;
- else
+ clipped = req;
+ drm_rect_intersect(&clipped, &crtc_state->pipe_src);
+
+ if (!drm_rect_visible(&clipped) ||
+ !drm_rect_equals(&clipped, &req))
return -EINVAL;
+
+ return 0;
}
static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
@@ -1160,7 +1162,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
crtc->overlay = overlay;
/* line too wide, i.e. one-line-mode */
- if (crtc->config->pipe_src_w > 1024 &&
+ if (drm_rect_width(&crtc->config->pipe_src) > 1024 &&
crtc->config->gmch_pfit.control & PFIT_ENABLE) {
overlay->pfit_active = true;
update_pfit_vscale_ratio(overlay);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index a0c8e43db5eb..d1d1b59102d6 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -35,6 +35,7 @@
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_drrs.h"
#include "intel_panel.h"
bool intel_panel_use_ssc(struct drm_i915_private *i915)
@@ -45,10 +46,87 @@ bool intel_panel_use_ssc(struct drm_i915_private *i915)
&& !(i915->quirks & QUIRK_LVDS_SSC_DISABLE);
}
+const struct drm_display_mode *
+intel_panel_preferred_fixed_mode(struct intel_connector *connector)
+{
+ return list_first_entry_or_null(&connector->panel.fixed_modes,
+ struct drm_display_mode, head);
+}
+
+const struct drm_display_mode *
+intel_panel_fixed_mode(struct intel_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ const struct drm_display_mode *fixed_mode, *best_mode = NULL;
+ int vrefresh = drm_mode_vrefresh(mode);
+
+ /* pick the fixed_mode that is closest in terms of vrefresh */
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
+ if (!best_mode ||
+ abs(drm_mode_vrefresh(fixed_mode) - vrefresh) <
+ abs(drm_mode_vrefresh(best_mode) - vrefresh))
+ best_mode = fixed_mode;
+ }
+
+ return best_mode;
+}
+
+const struct drm_display_mode *
+intel_panel_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode, *best_mode = NULL;
+ int min_vrefresh = i915->vbt.seamless_drrs_min_refresh_rate;
+ int max_vrefresh = drm_mode_vrefresh(adjusted_mode);
+
+ /* pick the fixed_mode with the lowest refresh rate */
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
+ int vrefresh = drm_mode_vrefresh(fixed_mode);
+
+ if (vrefresh >= min_vrefresh && vrefresh < max_vrefresh) {
+ max_vrefresh = vrefresh;
+ best_mode = fixed_mode;
+ }
+ }
+
+ return best_mode;
+}
+
+int intel_panel_get_modes(struct intel_connector *connector)
+{
+ const struct drm_display_mode *fixed_mode;
+ int num_modes = 0;
+
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->base.dev, fixed_mode);
+ if (mode) {
+ drm_mode_probed_add(&connector->base, mode);
+ num_modes++;
+ }
+ }
+
+ return num_modes;
+}
+
+enum drrs_type intel_panel_drrs_type(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ if (list_empty(&connector->panel.fixed_modes) ||
+ list_is_singular(&connector->panel.fixed_modes))
+ return DRRS_TYPE_NONE;
+
+ return i915->vbt.drrs_type;
+}
+
int intel_panel_compute_config(struct intel_connector *connector,
struct drm_display_mode *adjusted_mode)
{
- const struct drm_display_mode *fixed_mode = connector->panel.fixed_mode;
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(connector, adjusted_mode);
if (!fixed_mode)
return 0;
@@ -75,128 +153,142 @@ int intel_panel_compute_config(struct intel_connector *connector,
return 0;
}
-static bool is_downclock_mode(const struct drm_display_mode *downclock_mode,
- const struct drm_display_mode *fixed_mode)
+static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
{
- return drm_mode_match(downclock_mode, fixed_mode,
+ return drm_mode_match(mode, preferred_mode,
DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS) &&
- downclock_mode->clock < fixed_mode->clock;
+ mode->clock != preferred_mode->clock;
}
-struct drm_display_mode *
-intel_panel_edid_downclock_mode(struct intel_connector *connector,
- const struct drm_display_mode *fixed_mode)
+static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- const struct drm_display_mode *scan, *best_mode = NULL;
- struct drm_display_mode *downclock_mode;
- int best_clock = fixed_mode->clock;
+ const struct drm_display_mode *preferred_mode =
+ intel_panel_preferred_fixed_mode(connector);
+ struct drm_display_mode *mode, *next;
- list_for_each_entry(scan, &connector->base.probed_modes, head) {
- /*
- * If one mode has the same resolution with the fixed_panel
- * mode while they have the different refresh rate, it means
- * that the reduced downclock is found. In such
- * case we can set the different FPx0/1 to dynamically select
- * between low and high frequency.
- */
- if (is_downclock_mode(scan, fixed_mode) &&
- scan->clock < best_clock) {
- /*
- * The downclock is already found. But we
- * expect to find the lower downclock.
- */
- best_clock = scan->clock;
- best_mode = scan;
- }
- }
-
- if (!best_mode)
- return NULL;
-
- downclock_mode = drm_mode_duplicate(&dev_priv->drm, best_mode);
- if (!downclock_mode)
- return NULL;
+ list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
+ if (!is_alt_fixed_mode(mode, preferred_mode))
+ continue;
- drm_dbg_kms(&dev_priv->drm,
- "[CONNECTOR:%d:%s] using downclock mode from EDID: ",
- connector->base.base.id, connector->base.name);
- drm_mode_debug_printmodeline(downclock_mode);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using alternate EDID fixed mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name,
+ DRM_MODE_ARG(mode));
- return downclock_mode;
+ list_move_tail(&mode->head, &connector->panel.fixed_modes);
+ }
}
-struct drm_display_mode *
-intel_panel_edid_fixed_mode(struct intel_connector *connector)
+static void intel_panel_add_edid_preferred_mode(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- const struct drm_display_mode *scan;
- struct drm_display_mode *fixed_mode;
+ struct drm_display_mode *scan, *fixed_mode = NULL;
if (list_empty(&connector->base.probed_modes))
- return NULL;
+ return;
- /* prefer fixed mode from EDID if available */
+ /* make sure the preferred mode is first */
list_for_each_entry(scan, &connector->base.probed_modes, head) {
- if ((scan->type & DRM_MODE_TYPE_PREFERRED) == 0)
- continue;
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ fixed_mode = scan;
+ break;
+ }
+ }
- fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
- if (!fixed_mode)
- return NULL;
+ if (!fixed_mode)
+ fixed_mode = list_first_entry(&connector->base.probed_modes,
+ typeof(*fixed_mode), head);
- drm_dbg_kms(&dev_priv->drm,
- "[CONNECTOR:%d:%s] using preferred mode from EDID: ",
- connector->base.base.id, connector->base.name);
- drm_mode_debug_printmodeline(fixed_mode);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using %s EDID fixed mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name,
+ fixed_mode->type & DRM_MODE_TYPE_PREFERRED ? "preferred" : "first",
+ DRM_MODE_ARG(fixed_mode));
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- return fixed_mode;
+ list_move_tail(&fixed_mode->head, &connector->panel.fixed_modes);
+}
+
+static void intel_panel_destroy_probed_modes(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_display_mode *mode, *next;
+
+ list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
+ list_del(&mode->head);
+ drm_mode_destroy(&i915->drm, mode);
}
+}
- scan = list_first_entry(&connector->base.probed_modes,
- typeof(*scan), head);
+void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, bool has_drrs)
+{
+ intel_panel_add_edid_preferred_mode(connector);
+ if (intel_panel_preferred_fixed_mode(connector) && has_drrs)
+ intel_panel_add_edid_alt_fixed_modes(connector);
+ intel_panel_destroy_probed_modes(connector);
+}
+
+static void intel_panel_add_fixed_mode(struct intel_connector *connector,
+ struct drm_display_mode *fixed_mode,
+ const char *type)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_display_info *info = &connector->base.display_info;
- fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
if (!fixed_mode)
- return NULL;
+ return;
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
- drm_dbg_kms(&dev_priv->drm,
- "[CONNECTOR:%d:%s] using first mode from EDID: ",
- connector->base.base.id, connector->base.name);
- drm_mode_debug_printmodeline(fixed_mode);
+ info->width_mm = fixed_mode->width_mm;
+ info->height_mm = fixed_mode->height_mm;
- return fixed_mode;
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] using %s fixed mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name, type,
+ DRM_MODE_ARG(fixed_mode));
+
+ list_add_tail(&fixed_mode->head, &connector->panel.fixed_modes);
}
-struct drm_display_mode *
-intel_panel_vbt_fixed_mode(struct intel_connector *connector)
+void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct drm_display_info *info = &connector->base.display_info;
- struct drm_display_mode *fixed_mode;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *mode;
- if (!dev_priv->vbt.lfp_lvds_vbt_mode)
- return NULL;
+ mode = i915->vbt.lfp_lvds_vbt_mode;
+ if (!mode)
+ return;
- fixed_mode = drm_mode_duplicate(&dev_priv->drm,
- dev_priv->vbt.lfp_lvds_vbt_mode);
- if (!fixed_mode)
- return NULL;
+ intel_panel_add_fixed_mode(connector,
+ drm_mode_duplicate(&i915->drm, mode),
+ "VBT LFP");
+}
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *mode;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] using mode from VBT: ",
- connector->base.base.id, connector->base.name);
- drm_mode_debug_printmodeline(fixed_mode);
+ mode = i915->vbt.sdvo_lvds_vbt_mode;
+ if (!mode)
+ return;
- info->width_mm = fixed_mode->width_mm;
- info->height_mm = fixed_mode->height_mm;
+ intel_panel_add_fixed_mode(connector,
+ drm_mode_duplicate(&i915->drm, mode),
+ "VBT SDVO");
+}
- return fixed_mode;
+void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ intel_panel_add_fixed_mode(connector,
+ intel_encoder_current_mode(encoder),
+ "current (BIOS)");
}
/* adjusted_mode has been preset to be the panel's fixed mode */
@@ -205,18 +297,20 @@ static int pch_panel_fitting(struct intel_crtc_state *crtc_state,
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
int x, y, width, height;
/* Native modes don't need fitting */
- if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
- adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h &&
+ if (adjusted_mode->crtc_hdisplay == pipe_src_w &&
+ adjusted_mode->crtc_vdisplay == pipe_src_h &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
return 0;
switch (conn_state->scaling_mode) {
case DRM_MODE_SCALE_CENTER:
- width = crtc_state->pipe_src_w;
- height = crtc_state->pipe_src_h;
+ width = pipe_src_w;
+ height = pipe_src_h;
x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
break;
@@ -224,19 +318,17 @@ static int pch_panel_fitting(struct intel_crtc_state *crtc_state,
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
{
- u32 scaled_width = adjusted_mode->crtc_hdisplay
- * crtc_state->pipe_src_h;
- u32 scaled_height = crtc_state->pipe_src_w
- * adjusted_mode->crtc_vdisplay;
+ u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
+ u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
if (scaled_width > scaled_height) { /* pillar */
- width = scaled_height / crtc_state->pipe_src_h;
+ width = scaled_height / pipe_src_h;
if (width & 1)
width++;
x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->crtc_vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
- height = scaled_width / crtc_state->pipe_src_w;
+ height = scaled_width / pipe_src_w;
if (height & 1)
height++;
y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
@@ -251,8 +343,8 @@ static int pch_panel_fitting(struct intel_crtc_state *crtc_state,
break;
case DRM_MODE_SCALE_NONE:
- WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w);
- WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h);
+ WARN_ON(adjusted_mode->crtc_hdisplay != pipe_src_w);
+ WARN_ON(adjusted_mode->crtc_vdisplay != pipe_src_h);
fallthrough;
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
@@ -333,10 +425,10 @@ static void i965_scale_aspect(struct intel_crtc_state *crtc_state,
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- u32 scaled_width = adjusted_mode->crtc_hdisplay *
- crtc_state->pipe_src_h;
- u32 scaled_height = crtc_state->pipe_src_w *
- adjusted_mode->crtc_vdisplay;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
+ u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
@@ -345,7 +437,7 @@ static void i965_scale_aspect(struct intel_crtc_state *crtc_state,
else if (scaled_width < scaled_height)
*pfit_control |= PFIT_ENABLE |
PFIT_SCALING_LETTER;
- else if (adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w)
+ else if (adjusted_mode->crtc_hdisplay != pipe_src_w)
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
}
@@ -354,10 +446,10 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
u32 *border)
{
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- u32 scaled_width = adjusted_mode->crtc_hdisplay *
- crtc_state->pipe_src_h;
- u32 scaled_height = crtc_state->pipe_src_w *
- adjusted_mode->crtc_vdisplay;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
+ u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
u32 bits;
/*
@@ -367,12 +459,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
*/
if (scaled_width > scaled_height) { /* pillar */
centre_horizontally(adjusted_mode,
- scaled_height /
- crtc_state->pipe_src_h);
+ scaled_height / pipe_src_h);
*border = LVDS_BORDER_ENABLE;
- if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay) {
- bits = panel_fitter_scaling(crtc_state->pipe_src_h,
+ if (pipe_src_h != adjusted_mode->crtc_vdisplay) {
+ bits = panel_fitter_scaling(pipe_src_h,
adjusted_mode->crtc_vdisplay);
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
@@ -383,12 +474,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
}
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode,
- scaled_width /
- crtc_state->pipe_src_w);
+ scaled_width / pipe_src_w);
*border = LVDS_BORDER_ENABLE;
- if (crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
- bits = panel_fitter_scaling(crtc_state->pipe_src_w,
+ if (pipe_src_w != adjusted_mode->crtc_hdisplay) {
+ bits = panel_fitter_scaling(pipe_src_w,
adjusted_mode->crtc_hdisplay);
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
@@ -413,10 +503,12 @@ static int gmch_panel_fitting(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
/* Native modes don't need fitting */
- if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
- adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h)
+ if (adjusted_mode->crtc_hdisplay == pipe_src_w &&
+ adjusted_mode->crtc_vdisplay == pipe_src_h)
goto out;
switch (conn_state->scaling_mode) {
@@ -425,8 +517,8 @@ static int gmch_panel_fitting(struct intel_crtc_state *crtc_state,
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
- centre_horizontally(adjusted_mode, crtc_state->pipe_src_w);
- centre_vertically(adjusted_mode, crtc_state->pipe_src_h);
+ centre_horizontally(adjusted_mode, pipe_src_w);
+ centre_vertically(adjusted_mode, pipe_src_h);
border = LVDS_BORDER_ENABLE;
break;
case DRM_MODE_SCALE_ASPECT:
@@ -442,8 +534,8 @@ static int gmch_panel_fitting(struct intel_crtc_state *crtc_state,
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
- if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay ||
- crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
+ if (pipe_src_h != adjusted_mode->crtc_vdisplay ||
+ pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
if (DISPLAY_VER(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
@@ -508,7 +600,8 @@ enum drm_mode_status
intel_panel_mode_valid(struct intel_connector *connector,
const struct drm_display_mode *mode)
{
- const struct drm_display_mode *fixed_mode = connector->panel.fixed_mode;
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(connector, mode);
if (!fixed_mode)
return MODE_OK;
@@ -525,29 +618,29 @@ intel_panel_mode_valid(struct intel_connector *connector,
return MODE_OK;
}
-int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode,
- struct drm_display_mode *downclock_mode)
+int intel_panel_init(struct intel_connector *connector)
{
+ struct intel_panel *panel = &connector->panel;
+
intel_backlight_init_funcs(panel);
- panel->fixed_mode = fixed_mode;
- panel->downclock_mode = downclock_mode;
+ drm_dbg_kms(connector->base.dev,
+ "[CONNECTOR:%d:%s] DRRS type: %s\n",
+ connector->base.base.id, connector->base.name,
+ intel_drrs_type_str(intel_panel_drrs_type(connector)));
return 0;
}
-void intel_panel_fini(struct intel_panel *panel)
+void intel_panel_fini(struct intel_connector *connector)
{
- struct intel_connector *intel_connector =
- container_of(panel, struct intel_connector, panel);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_display_mode *fixed_mode, *next;
intel_backlight_destroy(panel);
- if (panel->fixed_mode)
- drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
-
- if (panel->downclock_mode)
- drm_mode_destroy(intel_connector->base.dev,
- panel->downclock_mode);
+ list_for_each_entry_safe(fixed_mode, next, &panel->fixed_modes, head) {
+ list_del(&fixed_mode->head);
+ drm_mode_destroy(connector->base.dev, fixed_mode);
+ }
}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index d50b3f7e9e58..2e32bb728beb 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -9,23 +9,30 @@
#include <linux/types.h>
enum drm_connector_status;
+enum drrs_type;
struct drm_connector;
struct drm_connector_state;
struct drm_display_mode;
struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
-struct intel_panel;
+struct intel_encoder;
-int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode,
- struct drm_display_mode *downclock_mode);
-void intel_panel_fini(struct intel_panel *panel);
+int intel_panel_init(struct intel_connector *connector);
+void intel_panel_fini(struct intel_connector *connector);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
bool intel_panel_use_ssc(struct drm_i915_private *i915);
-void intel_panel_fixed_mode(const struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode);
+const struct drm_display_mode *
+intel_panel_preferred_fixed_mode(struct intel_connector *connector);
+const struct drm_display_mode *
+intel_panel_fixed_mode(struct intel_connector *connector,
+ const struct drm_display_mode *mode);
+const struct drm_display_mode *
+intel_panel_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode);
+int intel_panel_get_modes(struct intel_connector *connector);
+enum drrs_type intel_panel_drrs_type(struct intel_connector *connector);
enum drm_mode_status
intel_panel_mode_valid(struct intel_connector *connector,
const struct drm_display_mode *mode);
@@ -33,12 +40,10 @@ int intel_panel_fitting(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
int intel_panel_compute_config(struct intel_connector *connector,
struct drm_display_mode *adjusted_mode);
-struct drm_display_mode *
-intel_panel_edid_downclock_mode(struct intel_connector *connector,
- const struct drm_display_mode *fixed_mode);
-struct drm_display_mode *
-intel_panel_edid_fixed_mode(struct intel_connector *connector);
-struct drm_display_mode *
-intel_panel_vbt_fixed_mode(struct intel_connector *connector);
+void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, bool has_drrs);
+void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector);
+void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector);
+void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
+ struct intel_encoder *encoder);
#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 9192769e3337..837152dca063 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -14,6 +14,23 @@
#include "intel_pps.h"
#include "intel_sdvo.h"
+bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+ enum pipe pch_transcoder)
+{
+ return HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) ||
+ (HAS_PCH_LPT_H(i915) && pch_transcoder == PIPE_A);
+}
+
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (HAS_PCH_LPT(i915))
+ return PIPE_A;
+ else
+ return crtc->pipe;
+}
+
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, enum port port,
i915_reg_t dp_reg)
@@ -88,6 +105,67 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
+static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t hdmi_reg)
+{
+ u32 val = intel_de_read(dev_priv, hdmi_reg);
+
+ if (val & SDVO_ENABLE ||
+ (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Sanitizing transcoder select for HDMI %c\n",
+ port_name(port));
+
+ val &= ~SDVO_PIPE_SEL_MASK;
+ val |= SDVO_PIPE_SEL(PIPE_A);
+
+ intel_de_write(dev_priv, hdmi_reg, val);
+}
+
+static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t dp_reg)
+{
+ u32 val = intel_de_read(dev_priv, dp_reg);
+
+ if (val & DP_PORT_EN ||
+ (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Sanitizing transcoder select for DP %c\n",
+ port_name(port));
+
+ val &= ~DP_PIPE_SEL_MASK;
+ val |= DP_PIPE_SEL(PIPE_A);
+
+ intel_de_write(dev_priv, dp_reg, val);
+}
+
+static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+{
+ /*
+ * The BIOS may select transcoder B on some of the PCH
+ * ports even it doesn't enable the port. This would trip
+ * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
+ * Sanitize the transcoder select bits to prevent that. We
+ * assume that the BIOS never actually enabled the port,
+ * because if it did we'd actually have to toggle the port
+ * on and back off to make the transcoder A select stick
+ * (see. intel_dp_link_down(), intel_disable_hdmi(),
+ * intel_disable_sdvo()).
+ */
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+
+ /* PCH SDVOB multiplex with HDMIB */
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+}
+
static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
const struct intel_link_m_n *m_n)
{
@@ -181,7 +259,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -192,7 +270,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
+ val |= TRANS_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
/*
* Make the BPC in transcoder be consistent with
@@ -466,9 +544,11 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
ilk_pch_clock_get(crtc_state);
}
-static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
+static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val, pipeconf_val;
/* FDI must be feeding us bits for PCH ports */
@@ -480,7 +560,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
@@ -521,7 +601,6 @@ void lpt_pch_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
assert_pch_transcoder_disabled(dev_priv, PIPE_A);
@@ -530,7 +609,7 @@ void lpt_pch_enable(struct intel_atomic_state *state,
/* Set transcoder timing. */
ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
- lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
+ lpt_enable_pch_transcoder(crtc_state);
}
void lpt_pch_disable(struct intel_atomic_state *state,
@@ -563,3 +642,9 @@ void lpt_pch_get_config(struct intel_crtc_state *crtc_state)
crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
}
+
+void intel_pch_sanitize(struct drm_i915_private *i915)
+{
+ if (HAS_PCH_IBX(i915))
+ ibx_sanitize_pch_ports(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h
index 749473d99320..41a63413cb3d 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.h
@@ -6,11 +6,19 @@
#ifndef _INTEL_PCH_DISPLAY_H_
#define _INTEL_PCH_DISPLAY_H_
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_link_m_n;
+bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+ enum pipe pch_transcoder);
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
+
void ilk_pch_pre_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void ilk_pch_enable(struct intel_atomic_state *state,
@@ -32,4 +40,6 @@ void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
+void intel_pch_sanitize(struct drm_i915_private *i915);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index d7b1de4cc205..d10f27d0b7b0 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -3,6 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "intel_atomic_plane.h"
#include "intel_display.h"
@@ -46,16 +47,55 @@ static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
- struct intel_memory_region *mem = i915->mm.stolen_region;
+ struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ resource_size_t phys_base;
u32 base, size;
+ u64 pinctl;
- if (!mem || plane_config->size == 0)
+ if (plane_config->size == 0)
+ return NULL;
+
+ base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
+ if (IS_DGFX(i915)) {
+ gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
+ gen8_pte_t pte;
+
+ gte += base / I915_GTT_PAGE_SIZE;
+
+ pte = ioread64(gte);
+ if (!(pte & GEN12_GGTT_PTE_LM)) {
+ drm_err(&i915->drm,
+ "Initial plane programming missing PTE_LM bit\n");
+ return NULL;
+ }
+
+ phys_base = pte & I915_GTT_PAGE_MASK;
+ mem = i915->mm.regions[INTEL_REGION_LMEM_0];
+
+ /*
+ * We don't currently expect this to ever be placed in the
+ * stolen portion.
+ */
+ if (phys_base >= resource_size(&mem->region)) {
+ drm_err(&i915->drm,
+ "Initial plane programming using invalid range, phys_base=%pa\n",
+ &phys_base);
+ return NULL;
+ }
+
+ drm_dbg(&i915->drm,
+ "Using phys_base=%pa, based on initial plane programming\n",
+ &phys_base);
+ } else {
+ phys_base = base;
+ mem = i915->mm.stolen_region;
+ }
+
+ if (!mem)
return NULL;
- base = round_down(plane_config->base,
- I915_GTT_MIN_ALIGNMENT);
size = round_up(plane_config->base + plane_config->size,
mem->min_page_size);
size -= base;
@@ -66,10 +106,11 @@ initial_plane_vma(struct drm_i915_private *i915,
* features.
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ mem == i915->mm.stolen_region &&
size * 2 > i915->stolen_usable_size)
return NULL;
- obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
+ obj = i915_gem_object_create_region_at(mem, phys_base, size, 0);
if (IS_ERR(obj))
return NULL;
@@ -99,7 +140,10 @@ initial_plane_vma(struct drm_i915_private *i915,
if (IS_ERR(vma))
goto err_obj;
- if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
+ pinctl = PIN_GLOBAL | PIN_OFFSET_FIXED | base;
+ if (HAS_GMCH(i915))
+ pinctl |= PIN_MAPPABLE;
+ if (i915_vma_pin(vma, 0, 0, pinctl))
goto err_obj;
if (i915_gem_object_is_tiled(obj) &&
@@ -127,6 +171,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_4_TILED:
break;
default:
drm_dbg(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 64bd4ca0edd4..5a598dd06039 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -6,6 +6,7 @@
#include "g4x_dp.h"
#include "i915_drv.h"
#include "intel_de.h"
+#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpll.h"
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 6c9e6e7f0afd..06db407e2749 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -100,11 +100,15 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
static bool psr2_global_enabled(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
+ if (i915->params.enable_psr == 1)
+ return false;
return true;
}
}
@@ -1221,6 +1225,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.dc3co_exit_delay = val;
intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
@@ -1348,6 +1353,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
intel_dp->psr.enabled = false;
+ intel_dp->psr.psr2_enabled = false;
+ intel_dp->psr.psr2_sel_fetch_enabled = false;
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
}
/**
@@ -1436,28 +1444,42 @@ unlock:
mutex_unlock(&psr->lock);
}
-static inline u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
+{
+ return IS_ALDERLAKE_P(dev_priv) ? 0 : PSR2_MAN_TRK_CTL_ENABLE;
+}
+
+static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
{
return IS_ALDERLAKE_P(dev_priv) ?
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
-static inline u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
{
return IS_ALDERLAKE_P(dev_priv) ?
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}
+static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
+{
+ return IS_ALDERLAKE_P(dev_priv) ?
+ ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
+ PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
+}
+
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (intel_dp->psr.psr2_sel_fetch_enabled)
- intel_de_rmw(dev_priv,
- PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0,
- man_trk_ctl_single_full_frame_bit_get(dev_priv));
+ intel_de_write(dev_priv,
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+ man_trk_ctl_enable_bit_get(dev_priv) |
+ man_trk_ctl_partial_frame_bit_get(dev_priv) |
+ man_trk_ctl_single_full_frame_bit_get(dev_priv));
/*
* Display WA #0884: skl+
@@ -1541,10 +1563,21 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
if (!crtc_state->enable_psr2_sel_fetch)
return;
+ for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ lockdep_assert_held(&intel_dp->psr.lock);
+ if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
+ return;
+ break;
+ }
+
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
crtc_state->psr2_man_track_ctl);
}
@@ -1554,10 +1587,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 val = 0;
-
- if (!IS_ALDERLAKE_P(dev_priv))
- val = PSR2_MAN_TRK_CTL_ENABLE;
+ u32 val = man_trk_ctl_enable_bit_get(dev_priv);
/* SF partial frame enable has to be set even on full update */
val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
@@ -1915,13 +1945,13 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
}
/**
- * intel_psr_wait_for_idle - wait for PSR be ready for a pipe update
+ * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
* @new_crtc_state: new CRTC state
*
* This function is expected to be called from pipe_update_start() where it is
* not expected to race with PSR enable or disable.
*/
-void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
+void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
struct intel_encoder *encoder;
@@ -1934,12 +1964,10 @@ void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int ret;
- mutex_lock(&intel_dp->psr.lock);
+ lockdep_assert_held(&intel_dp->psr.lock);
- if (!intel_dp->psr.enabled) {
- mutex_unlock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled)
continue;
- }
if (intel_dp->psr.psr2_enabled)
ret = _psr2_ready_for_pipe_update_locked(intel_dp);
@@ -1948,8 +1976,6 @@ void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
if (ret)
drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
-
- mutex_unlock(&intel_dp->psr.lock);
}
}
@@ -2126,6 +2152,27 @@ unlock:
mutex_unlock(&intel_dp->psr.lock);
}
+static void _psr_invalidate_handle(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ u32 val;
+
+ if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
+ return;
+
+ val = man_trk_ctl_enable_bit_get(dev_priv) |
+ man_trk_ctl_partial_frame_bit_get(dev_priv) |
+ man_trk_ctl_continuos_full_frame(dev_priv);
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
+ } else {
+ intel_psr_exit(intel_dp);
+ }
+}
+
/**
* intel_psr_invalidate - Invalidade PSR
* @dev_priv: i915 device
@@ -2162,7 +2209,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
if (pipe_frontbuffer_bits)
- intel_psr_exit(intel_dp);
+ _psr_invalidate_handle(intel_dp);
mutex_unlock(&intel_dp->psr.lock);
}
@@ -2194,6 +2241,42 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
intel_dp->psr.dc3co_exit_delay);
}
+static void _psr_flush_handle(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
+ /* can we turn CFF off? */
+ if (intel_dp->psr.busy_frontbuffer_bits == 0) {
+ u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
+ man_trk_ctl_partial_frame_bit_get(dev_priv) |
+ man_trk_ctl_single_full_frame_bit_get(dev_priv);
+
+ /*
+ * turn continuous full frame off and do a single
+ * full frame
+ */
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+ val);
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+ }
+ } else {
+ /*
+ * continuous full frame is disabled, only a single full
+ * frame is required
+ */
+ psr_force_hw_tracking_exit(intel_dp);
+ }
+ } else {
+ psr_force_hw_tracking_exit(intel_dp);
+
+ if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
+ schedule_work(&intel_dp->psr.work);
+ }
+}
+
/**
* intel_psr_flush - Flush PSR
* @dev_priv: i915 device
@@ -2231,25 +2314,22 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
* we have to ensure that the PSR is not activated until
* intel_psr_resume() is called.
*/
- if (intel_dp->psr.paused) {
- mutex_unlock(&intel_dp->psr.lock);
- continue;
- }
+ if (intel_dp->psr.paused)
+ goto unlock;
if (origin == ORIGIN_FLIP ||
(origin == ORIGIN_CURSOR_UPDATE &&
!intel_dp->psr.psr2_sel_fetch_enabled)) {
tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
- mutex_unlock(&intel_dp->psr.lock);
- continue;
+ goto unlock;
}
- /* By definition flush = invalidate + flush */
- if (pipe_frontbuffer_bits)
- psr_force_hw_tracking_exit(intel_dp);
+ if (pipe_frontbuffer_bits == 0)
+ goto unlock;
- if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
- schedule_work(&intel_dp->psr.work);
+ /* By definition flush = invalidate + flush */
+ _psr_flush_handle(intel_dp);
+unlock:
mutex_unlock(&intel_dp->psr.lock);
}
}
@@ -2440,3 +2520,51 @@ bool intel_psr_enabled(struct intel_dp *intel_dp)
return ret;
}
+
+/**
+ * intel_psr_lock - grab PSR lock
+ * @crtc_state: the crtc state
+ *
+ * This is initially meant to be used by around CRTC update, when
+ * vblank sensitive registers are updated and we need grab the lock
+ * before it to avoid vblank evasion.
+ */
+void intel_psr_lock(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
+
+ if (!crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+ break;
+ }
+}
+
+/**
+ * intel_psr_unlock - release PSR lock
+ * @crtc_state: the crtc state
+ *
+ * Release the PSR lock that was held during pipe update.
+ */
+void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
+
+ if (!crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index f6526d9ccfdc..2ac3a46cccc5 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -41,7 +41,7 @@ void intel_psr_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
-void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
+void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state);
bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
@@ -55,4 +55,7 @@ void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
+void intel_psr_lock(const struct intel_crtc_state *crtc_state);
+void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
+
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
index c626a24fe98f..6f8e4ec5c0fb 100644
--- a/drivers/gpu/drm/i915/display/intel_qp_tables.c
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -3,7 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/drm_dsc.h>
+#include <drm/display/drm_dsc.h>
#include "i915_utils.h"
#include "intel_qp_tables.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 76e1188b01d4..d81855d57cdc 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -31,6 +31,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -283,7 +284,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
static const struct {
u8 cmd;
const char *name;
-} __attribute__ ((packed)) sdvo_cmd_names[] = {
+} __packed sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(RESET),
SDVO_CMD_NAME_ENTRY(GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(GET_FIRMWARE_REV),
@@ -783,24 +784,22 @@ static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
static bool
intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
- u16 clock,
- u16 width,
- u16 height)
+ const struct drm_display_mode *mode)
{
struct intel_sdvo_preferred_input_timing_args args;
memset(&args, 0, sizeof(args));
- args.clock = clock;
- args.width = width;
- args.height = height;
+ args.clock = mode->clock / 10;
+ args.width = mode->hdisplay;
+ args.height = mode->vdisplay;
args.interlace = 0;
if (IS_LVDS(intel_sdvo_connector)) {
const struct drm_display_mode *fixed_mode =
- intel_sdvo_connector->base.panel.fixed_mode;
+ intel_panel_fixed_mode(&intel_sdvo_connector->base, mode);
- if (fixed_mode->hdisplay != width ||
- fixed_mode->vdisplay != height)
+ if (fixed_mode->hdisplay != args.width ||
+ fixed_mode->vdisplay != args.height)
args.scaled = 1;
}
@@ -1236,9 +1235,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
intel_sdvo_connector,
- mode->clock / 10,
- mode->hdisplay,
- mode->vdisplay))
+ mode))
return false;
if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
@@ -1335,6 +1332,8 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
adjusted_mode);
pipe_config->sdvo_tv_clock = true;
} else if (IS_LVDS(intel_sdvo_connector)) {
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(&intel_sdvo_connector->base, mode);
int ret;
ret = intel_panel_compute_config(&intel_sdvo_connector->base,
@@ -1342,8 +1341,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
- intel_sdvo_connector->base.panel.fixed_mode))
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, fixed_mode))
return -EINVAL;
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
@@ -1465,7 +1463,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct intel_sdvo_connector_state *sdvo_state =
to_intel_sdvo_connector_state(conn_state);
- const struct intel_sdvo_connector *intel_sdvo_connector =
+ struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
const struct drm_display_mode *mode = &crtc_state->hw.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
@@ -1496,11 +1494,14 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
return;
/* lvds has a special fixed output timing. */
- if (IS_LVDS(intel_sdvo_connector))
- intel_sdvo_get_dtd_from_mode(&output_dtd,
- intel_sdvo_connector->base.panel.fixed_mode);
- else
+ if (IS_LVDS(intel_sdvo_connector)) {
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(&intel_sdvo_connector->base, mode);
+
+ intel_sdvo_get_dtd_from_mode(&output_dtd, fixed_mode);
+ } else {
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ }
if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
drm_info(&dev_priv->drm,
"Setting output timings on %s failed\n",
@@ -2291,33 +2292,12 @@ static int intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct drm_display_mode *newmode;
int num_modes = 0;
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- /*
- * Fetch modes from VBT. For SDVO prefer the VBT mode since some
- * SDVO->LVDS transcoders can't cope with the EDID mode.
- */
- if (dev_priv->vbt.sdvo_lvds_vbt_mode != NULL) {
- newmode = drm_mode_duplicate(connector->dev,
- dev_priv->vbt.sdvo_lvds_vbt_mode);
- if (newmode != NULL) {
- /* Guarantee the mode is preferred */
- newmode->type = (DRM_MODE_TYPE_PREFERRED |
- DRM_MODE_TYPE_DRIVER);
- drm_mode_probed_add(connector, newmode);
- num_modes++;
- }
- }
-
- /*
- * Attempt to get the mode list from DDC.
- * Assume that the preferred modes are
- * arranged in priority order.
- */
+ num_modes += intel_panel_get_modes(to_intel_connector(connector));
num_modes += intel_ddc_get_modes(connector, &intel_sdvo->ddc);
return num_modes;
@@ -2747,6 +2727,8 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
__drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
&conn_state->base.base);
+ INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes);
+
return sdvo_connector;
}
@@ -2890,7 +2872,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- struct drm_display_mode *mode;
DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
@@ -2919,20 +2900,20 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
- intel_sdvo_get_lvds_modes(connector);
-
- list_for_each_entry(mode, &connector->probed_modes, head) {
- if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- struct drm_display_mode *fixed_mode =
- drm_mode_duplicate(connector->dev, mode);
+ /*
+ * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+ * SDVO->LVDS transcoders can't cope with the EDID mode.
+ */
+ intel_panel_add_vbt_sdvo_fixed_mode(intel_connector);
- intel_panel_init(&intel_connector->panel,
- fixed_mode, NULL);
- break;
- }
+ if (!intel_panel_preferred_fixed_mode(intel_connector)) {
+ intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+ intel_panel_add_edid_fixed_modes(intel_connector, false);
}
- if (!intel_connector->panel.fixed_mode)
+ intel_panel_init(intel_connector);
+
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
goto err;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 7e6245b97fed..0dd4775e8195 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -32,10 +32,14 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
if (!intel_phy_is_snps(i915, phy))
continue;
+ /*
+ * If calibration does not complete successfully, we'll remember
+ * which phy was affected and skip setup of the corresponding
+ * output later.
+ */
if (intel_de_wait_for_clear(i915, DG2_PHY_MISC(phy),
DG2_PHY_DP_TX_ACK_MASK, 25))
- drm_err(&i915->drm, "SNPS PHY %c failed to calibrate after 25ms.\n",
- phy_name(phy));
+ i915->snps_phy_failed_calibration |= BIT(phy);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 2d71294aaceb..7c0df80612d0 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -30,6 +30,8 @@
* support.
*/
+#include <linux/string_helpers.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_color_mgmt.h>
@@ -96,13 +98,13 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
if (src_x % hsub || src_w % hsub) {
drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
- src_x, src_w, hsub, yesno(rotated));
+ src_x, src_w, hsub, str_yes_no(rotated));
return -EINVAL;
}
if (src_y % vsub || src_h % vsub) {
drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
- src_y, src_h, vsub, yesno(rotated));
+ src_y, src_h, vsub, str_yes_no(rotated));
return -EINVAL;
}
@@ -430,9 +432,6 @@ vlv_sprite_update_noarm(struct intel_plane *plane,
int crtc_y = plane_state->uapi.dst.y1;
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
plane_state->view.color_plane[0].mapping_stride);
@@ -440,8 +439,6 @@ vlv_sprite_update_noarm(struct intel_plane *plane,
SP_POS_Y(crtc_y) | SP_POS_X(crtc_x));
intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
SP_HEIGHT(crtc_h - 1) | SP_WIDTH(crtc_w - 1));
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -457,14 +454,11 @@ vlv_sprite_update_arm(struct intel_plane *plane,
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
u32 sprctl, linear_offset;
- unsigned long irqflags;
sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_sprite_update_csc(plane_state);
@@ -494,8 +488,6 @@ vlv_sprite_update_arm(struct intel_plane *plane,
vlv_sprite_update_clrc(plane_state);
vlv_sprite_update_gamma(plane_state);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -505,14 +497,9 @@ vlv_sprite_disable_arm(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static bool
@@ -862,15 +849,12 @@ ivb_sprite_update_noarm(struct intel_plane *plane,
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 sprscale = 0;
- unsigned long irqflags;
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE |
SPRITE_SRC_WIDTH(src_w - 1) |
SPRITE_SRC_HEIGHT(src_h - 1);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
plane_state->view.color_plane[0].mapping_stride);
intel_de_write_fw(dev_priv, SPRPOS(pipe),
@@ -879,8 +863,6 @@ ivb_sprite_update_noarm(struct intel_plane *plane,
SPRITE_HEIGHT(crtc_h - 1) | SPRITE_WIDTH(crtc_w - 1));
if (IS_IVYBRIDGE(dev_priv))
intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -895,14 +877,11 @@ ivb_sprite_update_arm(struct intel_plane *plane,
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
u32 sprctl, linear_offset;
- unsigned long irqflags;
sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
if (key->flags) {
intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value);
intel_de_write_fw(dev_priv, SPRKEYMSK(pipe),
@@ -931,8 +910,6 @@ ivb_sprite_update_arm(struct intel_plane *plane,
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
ivb_sprite_update_gamma(plane_state);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -941,17 +918,12 @@ ivb_sprite_disable_arm(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, SPRCTL(pipe), 0);
/* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0);
intel_de_write_fw(dev_priv, SPRSURF(pipe), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static bool
@@ -1204,15 +1176,12 @@ g4x_sprite_update_noarm(struct intel_plane *plane,
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 dvsscale = 0;
- unsigned long irqflags;
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE |
DVS_SRC_WIDTH(src_w - 1) |
DVS_SRC_HEIGHT(src_h - 1);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
plane_state->view.color_plane[0].mapping_stride);
intel_de_write_fw(dev_priv, DVSPOS(pipe),
@@ -1220,8 +1189,6 @@ g4x_sprite_update_noarm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, DVSSIZE(pipe),
DVS_HEIGHT(crtc_h - 1) | DVS_WIDTH(crtc_w - 1));
intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -1236,14 +1203,11 @@ g4x_sprite_update_arm(struct intel_plane *plane,
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
u32 dvscntr, linear_offset;
- unsigned long irqflags;
dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
if (key->flags) {
intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value);
intel_de_write_fw(dev_priv, DVSKEYMSK(pipe),
@@ -1267,8 +1231,6 @@ g4x_sprite_update_arm(struct intel_plane *plane,
g4x_sprite_update_gamma(plane_state);
else
ilk_sprite_update_gamma(plane_state);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -1277,16 +1239,11 @@ g4x_sprite_disable_arm(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0);
/* Disable the scaler */
intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0);
intel_de_write_fw(dev_priv, DVSSURF(pipe), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index fc037c027ea5..b8b822ea3755 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_display.h"
+#include "intel_display_power_map.h"
#include "intel_display_types.h"
#include "intel_dp_mst.h"
#include "intel_tc.h"
@@ -61,10 +62,12 @@ bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
return POWER_DOMAIN_TC_COLD_OFF;
- return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
+ return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
}
static intel_wakeref_t
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 8a39989b87ad..9379f3463344 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1145,8 +1145,8 @@ intel_tv_get_config(struct intel_encoder *encoder,
intel_tv_mode_to_mode(&mode, &tv_mode);
- drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
- drm_mode_debug_printmodeline(&mode);
+ drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&mode));
intel_tv_scale_mode_horiz(&mode, hdisplay,
xpos, mode.hdisplay - xsize - xpos);
@@ -1250,8 +1250,8 @@ intel_tv_compute_config(struct intel_encoder *encoder,
tv_conn_state->bypass_vfilter = false;
}
- drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
- drm_mode_debug_printmodeline(adjusted_mode);
+ drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(adjusted_mode));
/*
* The pipe scanline counter behaviour looks as follows when
@@ -1806,8 +1806,8 @@ intel_tv_get_modes(struct drm_connector *connector)
*/
intel_tv_mode_to_mode(mode, tv_mode);
if (count == 0) {
- drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
- drm_mode_debug_printmodeline(mode);
+ drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(mode));
}
intel_tv_scale_mode_horiz(mode, input->w, 0, 0);
intel_tv_scale_mode_vert(mode, input->h, 0, 0);
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index b9397d9363c5..4b98bab3b890 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -289,6 +289,9 @@ struct bdb_general_features {
#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204 */
#define HDMI_MAX_DATA_RATE_297 1 /* 204 */
#define HDMI_MAX_DATA_RATE_165 2 /* 204 */
+#define HDMI_MAX_DATA_RATE_594 3 /* 249 */
+#define HDMI_MAX_DATA_RATE_340 4 /* 249 */
+#define HDMI_MAX_DATA_RATE_300 5 /* 249 */
#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
@@ -719,20 +722,22 @@ struct bdb_lvds_options {
/*
* Block 41 - LFP Data Table Pointers
*/
+struct lvds_lfp_data_ptr_table {
+ u16 offset; /* offsets are from start of bdb */
+ u8 table_size;
+} __packed;
/* LFP pointer table contains entries to the struct below */
struct lvds_lfp_data_ptr {
- u16 fp_timing_offset; /* offsets are from start of bdb */
- u8 fp_table_size;
- u16 dvo_timing_offset;
- u8 dvo_table_size;
- u16 panel_pnp_id_offset;
- u8 pnp_table_size;
+ struct lvds_lfp_data_ptr_table fp_timing;
+ struct lvds_lfp_data_ptr_table dvo_timing;
+ struct lvds_lfp_data_ptr_table panel_pnp_id;
} __packed;
struct bdb_lvds_lfp_data_ptrs {
- u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ u8 lvds_entries;
struct lvds_lfp_data_ptr ptr[16];
+ struct lvds_lfp_data_ptr_table panel_name; /* 156-163? */
} __packed;
/*
@@ -764,6 +769,11 @@ struct lvds_pnp_id {
u8 mfg_year;
} __packed;
+/*
+ * For reference only. fp_timing has variable size so
+ * the data must be accessed using the data table pointers.
+ * Do not use this directly!
+ */
struct lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
@@ -774,6 +784,27 @@ struct bdb_lvds_lfp_data {
struct lvds_lfp_data_entry data[16];
} __packed;
+struct lvds_lfp_panel_name {
+ u8 name[13];
+} __packed;
+
+struct lvds_lfp_black_border {
+ u8 top; /* 227 */
+ u8 bottom; /* 227 */
+ u8 left; /* 238 */
+ u8 right; /* 238 */
+} __packed;
+
+struct bdb_lvds_lfp_data_tail {
+ struct lvds_lfp_panel_name panel_name[16]; /* 156-163? */
+ u16 scaling_enable; /* 187 */
+ u8 seamless_drrs_min_refresh_rate[16]; /* 188 */
+ u8 pixel_overlap_count[16]; /* 208 */
+ struct lvds_lfp_black_border black_border[16]; /* 227 */
+ u16 dual_lfp_port_sync_enable; /* 231 */
+ u16 gpu_dithering_for_banding_artifacts; /* 245 */
+} __packed;
+
/*
* Block 43 - LFP Backlight Control Data Block
*/
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 545eff5bf158..43e1bbc1e303 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -7,6 +7,8 @@
*/
#include <linux/limits.h>
+#include <drm/display/drm_dsc_helper.h>
+
#include "i915_drv.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -378,10 +380,18 @@ calculate_rc_params(struct rc_parameters *rc,
{
int bpc = vdsc_cfg->bits_per_component;
int bpp = vdsc_cfg->bits_per_pixel >> 4;
- int ofs_und6[] = { 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 };
- int ofs_und8[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 };
- int ofs_und12[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 };
- int ofs_und15[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 };
+ static const s8 ofs_und6[] = {
+ 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
+ };
+ static const s8 ofs_und8[] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+ };
+ static const s8 ofs_und12[] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+ };
+ static const s8 ofs_und15[] = {
+ 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
+ };
int qp_bpc_modifier = (bpc - 8) * 2;
u32 res, buf_i, bpp_i;
@@ -579,7 +589,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1;
int i = 0;
- if (crtc_state->bigjoiner)
+ if (crtc_state->bigjoiner_pipes)
num_vdsc_instances *= 2;
/* Populate PICTURE_PARAMETER_SET_0 registers */
@@ -1113,7 +1123,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dss_ctl1_val = 0;
- if (crtc_state->bigjoiner && !crtc_state->dsc.compression_enable) {
+ if (crtc_state->bigjoiner_pipes && !crtc_state->dsc.compression_enable) {
if (intel_crtc_is_bigjoiner_slave(crtc_state))
dss_ctl1_val |= UNCOMPRESSED_JOINER_SLAVE;
else
@@ -1140,7 +1150,7 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
- if (crtc_state->bigjoiner) {
+ if (crtc_state->bigjoiner_pipes) {
dss_ctl1_val |= BIG_JOINER_ENABLE;
if (!intel_crtc_is_bigjoiner_slave(crtc_state))
dss_ctl1_val |= MASTER_BIG_JOINER_ENABLE;
@@ -1156,7 +1166,7 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
/* Disable only if either of them is enabled */
if (old_crtc_state->dsc.compression_enable ||
- old_crtc_state->bigjoiner) {
+ old_crtc_state->bigjoiner_pipes) {
intel_de_write(dev_priv, dss_ctl1_reg(crtc, old_crtc_state->cpu_transcoder), 0);
intel_de_write(dev_priv, dss_ctl2_reg(crtc, old_crtc_state->cpu_transcoder), 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 139e8936edc5..396f2f994fa0 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -69,9 +69,9 @@ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_stat
/* The hw imposes the extra scanline before frame start */
if (DISPLAY_VER(i915) >= 13)
- return crtc_state->vrr.guardband + i915->framestart_delay + 1;
+ return crtc_state->vrr.guardband + crtc_state->framestart_delay + 1;
else
- return crtc_state->vrr.pipeline_full + i915->framestart_delay + 1;
+ return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1;
}
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index c2e94118566b..4092679be21e 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -197,7 +197,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
return skl_update_scaler(crtc_state, !crtc_state->hw.active,
SKL_CRTC_INDEX,
&crtc_state->scaler_state.scaler_id,
- crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ drm_rect_width(&crtc_state->pipe_src),
+ drm_rect_height(&crtc_state->pipe_src),
width, height, NULL, 0,
crtc_state->pch_pfit.enabled);
}
@@ -400,10 +401,6 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
- struct drm_rect src = {
- .x2 = crtc_state->pipe_src_w << 16,
- .y2 = crtc_state->pipe_src_h << 16,
- };
const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
u16 uv_rgb_hphase, uv_rgb_vphase;
enum pipe pipe = crtc->pipe;
@@ -412,7 +409,7 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
int x = dst->x1;
int y = dst->y1;
int hscale, vscale;
- unsigned long irqflags;
+ struct drm_rect src;
int id;
u32 ps_ctrl;
@@ -423,6 +420,10 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
crtc_state->scaler_state.scaler_id < 0))
return;
+ drm_rect_init(&src, 0, 0,
+ drm_rect_width(&crtc_state->pipe_src) << 16,
+ drm_rect_height(&crtc_state->pipe_src) << 16);
+
hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
@@ -434,8 +435,6 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
skl_scaler_setup_filter(dev_priv, pipe, id, 0,
crtc_state->hw.scaling_filter);
@@ -449,8 +448,6 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
x << 16 | y);
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
width << 16 | height);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
void
@@ -519,15 +516,10 @@ static void skl_detach_scaler(struct intel_crtc *crtc, int id)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, SKL_PS_CTRL(crtc->pipe, id), 0);
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(crtc->pipe, id), 0);
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/*
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 1223075595ff..caa03324a733 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -615,9 +615,20 @@ skl_plane_disable_arm(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ skl_write_plane_wm(plane, crtc_state);
+
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
+}
+
+static void
+icl_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
if (icl_is_hdr_plane(dev_priv, plane_id))
intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
@@ -627,8 +638,6 @@ skl_plane_disable_arm(struct intel_plane *plane,
intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static bool
@@ -762,6 +771,18 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
return PLANE_CTL_TILED_X;
case I915_FORMAT_MOD_Y_TILED:
return PLANE_CTL_TILED_Y;
+ case I915_FORMAT_MOD_4_TILED:
+ return PLANE_CTL_TILED_4;
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ return PLANE_CTL_TILED_4 |
+ PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ return PLANE_CTL_TILED_4 |
+ PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
@@ -1065,7 +1086,7 @@ static void icl_plane_csc_load_black(struct intel_plane *plane)
intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
}
-static int skl_plane_color_plane(const struct intel_plane_state *plane_state)
+static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
{
/* Program the UV plane on planar master */
if (plane_state->planar_linked_plane && !plane_state->planar_slave)
@@ -1082,14 +1103,11 @@ skl_plane_update_noarm(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- int color_plane = skl_plane_color_plane(plane_state);
- u32 stride = skl_plane_stride(plane_state, color_plane);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
+ u32 stride = skl_plane_stride(plane_state, 0);
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- unsigned long irqflags;
/* The scaler will handle the output position */
if (plane_state->scaler_id >= 0) {
@@ -1097,14 +1115,99 @@ skl_plane_update_noarm(struct intel_plane *plane,
crtc_y = 0;
}
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id),
+ PLANE_STRIDE_(stride));
+ intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+ PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
+ intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+ PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
+
+ skl_write_plane_wm(plane, crtc_state);
+}
+
+static void
+skl_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ u32 x = plane_state->view.color_plane[0].x;
+ u32 y = plane_state->view.color_plane[0].y;
+ u32 plane_ctl, plane_color_ctl = 0;
+
+ plane_ctl = plane_state->ctl |
+ skl_plane_ctl_crtc(crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ plane_color_ctl = plane_state->color_ctl |
+ glk_plane_color_ctl_crtc(crtc_state);
+
+ intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state));
+ intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state));
+ intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state));
+
+ intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+ PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
+
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id),
+ skl_plane_aux_dist(plane_state, 0));
+
+ intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
+ PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) |
+ PLANE_OFFSET_X(plane_state->view.color_plane[1].x));
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
/*
- * FIXME: pxp session invalidation can hit any time even at time of commit
- * or after the commit, display content will be garbage.
+ * Enable the scaler before the plane so that we don't
+ * get a catastrophic underrun even if the two operations
+ * end up happening in two different frames.
+ *
+ * TODO: split into noarm+arm pair
*/
- if (plane_state->force_black)
- icl_plane_csc_load_black(plane);
+ if (plane_state->scaler_id >= 0)
+ skl_program_plane_scaler(plane, crtc_state, plane_state);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ skl_plane_surf(plane_state, 0));
+}
+
+static void
+icl_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ int color_plane = icl_plane_color_plane(plane_state);
+ u32 stride = skl_plane_stride(plane_state, color_plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int x = plane_state->view.color_plane[color_plane].x;
+ int y = plane_state->view.color_plane[color_plane].y;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ u32 plane_color_ctl;
+
+ plane_color_ctl = plane_state->color_ctl |
+ glk_plane_color_ctl_crtc(crtc_state);
+
+ /* The scaler will handle the output position */
+ if (plane_state->scaler_id >= 0) {
+ crtc_x = 0;
+ crtc_y = 0;
+ }
intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id),
PLANE_STRIDE_(stride));
@@ -1113,6 +1216,13 @@ skl_plane_update_noarm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
+ intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state));
+ intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state));
+ intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state));
+
+ intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+ PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
+
if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) {
intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0),
lower_32_bits(plane_state->ccval));
@@ -1120,60 +1230,45 @@ skl_plane_update_noarm(struct intel_plane *plane,
upper_32_bits(plane_state->ccval));
}
+ /* FLAT CCS doesn't need to program AUX_DIST */
+ if (!HAS_FLAT_CCS(dev_priv))
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id),
+ skl_plane_aux_dist(plane_state, color_plane));
+
if (icl_is_hdr_plane(dev_priv, plane_id))
intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
plane_state->cus_ctl);
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
skl_write_plane_wm(plane, crtc_state);
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+ /*
+ * FIXME: pxp session invalidation can hit any time even at time of commit
+ * or after the commit, display content will be garbage.
+ */
+ if (plane_state->force_black)
+ icl_plane_csc_load_black(plane);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
}
static void
-skl_plane_update_arm(struct intel_plane *plane,
+icl_plane_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- int color_plane = skl_plane_color_plane(plane_state);
- u32 x = plane_state->view.color_plane[color_plane].x;
- u32 y = plane_state->view.color_plane[color_plane].y;
- u32 plane_color_ctl = 0;
- u32 plane_ctl = plane_state->ctl;
- unsigned long irqflags;
-
- plane_ctl |= skl_plane_ctl_crtc(crtc_state);
-
- if (DISPLAY_VER(dev_priv) >= 10)
- plane_color_ctl = plane_state->color_ctl |
- glk_plane_color_ctl_crtc(crtc_state);
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state));
- intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state));
- intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state));
-
- intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
- PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
-
- intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id),
- skl_plane_aux_dist(plane_state, color_plane));
-
- if (DISPLAY_VER(dev_priv) < 11)
- intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
- PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) |
- PLANE_OFFSET_X(plane_state->view.color_plane[1].x));
+ int color_plane = icl_plane_color_plane(plane_state);
+ u32 plane_ctl;
- if (DISPLAY_VER(dev_priv) >= 10)
- intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+ plane_ctl = plane_state->ctl |
+ skl_plane_ctl_crtc(crtc_state);
/*
* Enable the scaler before the plane so that we don't
@@ -1193,8 +1288,6 @@ skl_plane_update_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
skl_plane_surf(plane_state, color_plane));
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -1204,7 +1297,6 @@ skl_plane_async_flip(struct intel_plane *plane,
bool async_flip)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- unsigned long irqflags;
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
u32 plane_ctl = plane_state->ctl;
@@ -1214,13 +1306,9 @@ skl_plane_async_flip(struct intel_plane *plane,
if (async_flip)
plane_ctl |= PLANE_CTL_ASYNC_FLIP;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
skl_plane_surf(plane_state, 0));
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static bool intel_format_is_p01x(u32 format)
@@ -1325,7 +1413,7 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
to_i915(plane_state->uapi.plane->dev);
int crtc_x = plane_state->uapi.dst.x1;
int crtc_w = drm_rect_width(&plane_state->uapi.dst);
- int pipe_src_w = crtc_state->pipe_src_w;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
/*
* Display WA #1175: glk
@@ -1545,9 +1633,10 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
/*
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
- * they match with the main surface x/y offsets.
+ * they match with the main surface x/y offsets. On DG2
+ * there's no aux plane on fb so skip this checking.
*/
- if (intel_fb_is_ccs_modifier(fb->modifier)) {
+ if (intel_fb_is_ccs_modifier(fb->modifier) && aux_plane) {
while (!skl_check_main_ccs_coordinates(plane_state, x, y,
offset, aux_plane)) {
if (offset == 0)
@@ -1591,6 +1680,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
int uv_plane = 1;
+ int ccs_plane = intel_fb_is_ccs_modifier(fb->modifier) ?
+ skl_main_to_aux_plane(fb, uv_plane) : 0;
int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
int x = plane_state->uapi.src.x1 >> 17;
@@ -1611,8 +1702,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
offset = intel_plane_compute_aligned_offset(&x, &y,
plane_state, uv_plane);
- if (intel_fb_is_ccs_modifier(fb->modifier)) {
- int ccs_plane = main_to_ccs_plane(fb, uv_plane);
+ if (ccs_plane) {
u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset;
u32 alignment = intel_surf_alignment(fb, uv_plane);
@@ -2011,9 +2101,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_Y216:
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
- if (modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED)
+ if (!intel_fb_is_ccs_modifier(modifier))
return true;
fallthrough;
default:
@@ -2094,6 +2182,10 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
return false;
+ /* Wa_14013215631 */
+ if (IS_DG2_DISPLAY_STEP(i915, STEP_A0, STEP_C0))
+ return false;
+
return plane_id < PLANE_SPRITE4;
}
@@ -2106,6 +2198,8 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
caps |= INTEL_PLANE_CAP_TILING_Y;
if (DISPLAY_VER(i915) < 12)
caps |= INTEL_PLANE_CAP_TILING_Yf;
+ if (HAS_4TILE(i915))
+ caps |= INTEL_PLANE_CAP_TILING_4;
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
@@ -2162,9 +2256,15 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
}
plane->max_stride = skl_plane_max_stride;
- plane->update_noarm = skl_plane_update_noarm;
- plane->update_arm = skl_plane_update_arm;
- plane->disable_arm = skl_plane_disable_arm;
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ plane->update_noarm = icl_plane_update_noarm;
+ plane->update_arm = icl_plane_update_arm;
+ plane->disable_arm = icl_plane_disable_arm;
+ } else {
+ plane->update_noarm = skl_plane_update_noarm;
+ plane->update_arm = skl_plane_update_arm;
+ plane->disable_arm = skl_plane_disable_arm;
+ }
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
@@ -2278,13 +2378,14 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
+ static_assert(PLANE_CTL_TILED_YF == PLANE_CTL_TILED_4);
if (!plane->get_hw_state(plane, &pipe))
return;
drm_WARN_ON(dev, pipe != crtc->pipe);
- if (crtc_state->bigjoiner) {
+ if (crtc_state->bigjoiner_pipes) {
drm_dbg_kms(&dev_priv->drm,
"Unsupported bigjoiner configuration for initial FB\n");
return;
@@ -2332,19 +2433,34 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
case PLANE_CTL_TILED_Y:
plane_config->tiling = I915_TILING_Y;
if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
- fb->modifier = DISPLAY_VER(dev_priv) >= 12 ?
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
- I915_FORMAT_MOD_Y_TILED_CCS;
+ if (DISPLAY_VER(dev_priv) >= 12)
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
else
fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
- case PLANE_CTL_TILED_YF:
- if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
- fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
- else
- fb->modifier = I915_FORMAT_MOD_Yf_TILED;
+ case PLANE_CTL_TILED_YF: /* aka PLANE_CTL_TILED_4 on XE_LPD+ */
+ if (HAS_4TILE(dev_priv)) {
+ u32 rc_mask = PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+
+ if ((val & rc_mask) == rc_mask)
+ fb->modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS;
+ else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_4_TILED_DG2_MC_CCS;
+ else if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
+ else
+ fb->modifier = I915_FORMAT_MOD_4_TILED;
+ } else {
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED;
+ }
break;
default:
MISSING_CASE(tiling);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 0d936f658b3f..1954f07f0d3e 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1660,6 +1660,8 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_preferred_fixed_mode(connector);
u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
@@ -1673,8 +1675,8 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
drm_connector_set_panel_orientation_with_quirk(&connector->base,
intel_dsi_get_panel_orientation(connector),
- connector->panel.fixed_mode->hdisplay,
- connector->panel.fixed_mode->vdisplay);
+ fixed_mode->hdisplay,
+ fixed_mode->vdisplay);
}
#define NS_KHZ_RATIO 1000000
@@ -1857,7 +1859,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *current_mode, *fixed_mode;
+ struct drm_display_mode *current_mode;
enum port port;
enum pipe pipe;
@@ -1978,15 +1980,16 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
mutex_lock(&dev->mode_config.mutex);
- fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
+ intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
- if (!fixed_mode) {
+ if (!intel_panel_preferred_fixed_mode(intel_connector)) {
drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
- intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_init(intel_connector);
+
intel_backlight_setup(intel_connector, INVALID_PIPE);
vlv_dsi_add_properties(intel_connector);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index df880f44700a..5894b0138343 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -26,6 +26,7 @@
*/
#include <linux/kernel.h>
+#include <linux/string_helpers.h>
#include "i915_drv.h"
#include "intel_de.h"
@@ -393,10 +394,7 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
/* Calculate TXESC2 divider */
div2_value = DIV_ROUND_UP(div1_value, txesc1_div);
- if (div2_value < 10)
- txesc2_div = div2_value;
- else
- txesc2_div = 10;
+ txesc2_div = min_t(u32, div2_value, 10);
intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
(1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
@@ -581,7 +579,7 @@ static void assert_dsi_pll(struct drm_i915_private *i915, bool state)
I915_STATE_WARN(cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
+ str_on_off(state), str_on_off(cur_state));
}
void assert_dsi_pll_enabled(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 470fdfd61a0f..ddda468241ef 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -138,21 +138,21 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !dma_resv_test_signaled(obj->resv, true);
+ * !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
args->busy = 0;
- dma_resv_iter_begin(&cursor, obj->base.resv, true);
+ dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor))
args->busy = 0;
- if (dma_resv_iter_is_exclusive(&cursor))
- /* Translate the exclusive fence to the READ *and* WRITE engine */
+ if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE)
+ /* Translate the write fences to the READ *and* WRITE engine */
args->busy |= busy_check_writer(fence);
else
- /* Translate shared fences to READ set of engines */
+ /* Translate read fences to READ set of engines */
args->busy |= busy_check_reader(fence);
}
dma_resv_iter_end(&cursor);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index ce91b23385cf..0512afdd20d8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -108,14 +108,16 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
trace_i915_gem_object_clflush(obj);
clflush = NULL;
- if (!(flags & I915_CLFLUSH_SYNC))
+ if (!(flags & I915_CLFLUSH_SYNC) &&
+ dma_resv_reserve_fences(obj->base.resv, 1) == 0)
clflush = clflush_work_create(obj);
if (clflush) {
i915_sw_fence_await_reservation(&clflush->base.chain,
obj->base.resv, NULL, true,
i915_fence_timeout(i915),
I915_FENCE_GFP);
- dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
+ dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
+ DMA_RESV_USAGE_KERNEL);
dma_fence_work_commit(&clflush->base);
/*
* We must have successfully populated the pages(since we are
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 9ae294eb7fb4..ab4c5ab28e4d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -64,6 +64,7 @@
*
*/
+#include <linux/highmem.h>
#include <linux/log2.h>
#include <linux/nospec.h>
@@ -1030,23 +1031,44 @@ static void free_engines_rcu(struct rcu_head *rcu)
free_engines(engines);
}
+static void accumulate_runtime(struct i915_drm_client *client,
+ struct i915_gem_engines *engines)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ if (!client)
+ return;
+
+ /* Transfer accumulated runtime to the parent GEM context. */
+ for_each_gem_engine(ce, engines, it) {
+ unsigned int class = ce->engine->uabi_class;
+
+ GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime));
+ atomic64_add(intel_context_get_total_runtime_ns(ce),
+ &client->past_runtime[class]);
+ }
+}
+
static int
engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_gem_engines *engines =
container_of(fence, typeof(*engines), fence);
+ struct i915_gem_context *ctx = engines->ctx;
switch (state) {
case FENCE_COMPLETE:
if (!list_empty(&engines->link)) {
- struct i915_gem_context *ctx = engines->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->stale.lock, flags);
list_del(&engines->link);
spin_unlock_irqrestore(&ctx->stale.lock, flags);
}
- i915_gem_context_put(engines->ctx);
+ accumulate_runtime(ctx->client, engines);
+ i915_gem_context_put(ctx);
+
break;
case FENCE_FREE:
@@ -1256,6 +1278,9 @@ static void i915_gem_context_release_work(struct work_struct *work)
if (ctx->pxp_wakeref)
intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
+ if (ctx->client)
+ i915_drm_client_put(ctx->client);
+
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -1466,7 +1491,7 @@ static void set_closed_name(struct i915_gem_context *ctx)
static void context_close(struct i915_gem_context *ctx)
{
- struct i915_address_space *vm;
+ struct i915_drm_client *client;
/* Flush any concurrent set_engines() */
mutex_lock(&ctx->engines_mutex);
@@ -1479,19 +1504,6 @@ static void context_close(struct i915_gem_context *ctx)
set_closed_name(ctx);
- vm = ctx->vm;
- if (vm) {
- /* i915_vm_close drops the final reference, which is a bit too
- * early and could result in surprises with concurrent
- * operations racing with thist ctx close. Keep a full reference
- * until the end.
- */
- i915_vm_get(vm);
- i915_vm_close(vm);
- }
-
- ctx->file_priv = ERR_PTR(-EBADF);
-
/*
* The LUT uses the VMA as a backpointer to unref the object,
* so we need to clear the LUT before we close all the VMA (inside
@@ -1499,10 +1511,19 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
+ ctx->file_priv = ERR_PTR(-EBADF);
+
spin_lock(&ctx->i915->gem.contexts.lock);
list_del(&ctx->link);
spin_unlock(&ctx->i915->gem.contexts.lock);
+ client = ctx->client;
+ if (client) {
+ spin_lock(&client->ctx_lock);
+ list_del_rcu(&ctx->client_link);
+ spin_unlock(&client->ctx_lock);
+ }
+
mutex_unlock(&ctx->mutex);
/*
@@ -1597,12 +1618,8 @@ i915_gem_create_context(struct drm_i915_private *i915,
}
vm = &ppgtt->vm;
}
- if (vm) {
- ctx->vm = i915_vm_open(vm);
-
- /* i915_vm_open() takes a reference */
- i915_vm_put(vm);
- }
+ if (vm)
+ ctx->vm = vm;
mutex_init(&ctx->engines_mutex);
if (pc->num_user_engines >= 0) {
@@ -1652,7 +1669,7 @@ err_engines:
free_engines(e);
err_vm:
if (ctx->vm)
- i915_vm_close(ctx->vm);
+ i915_vm_put(ctx->vm);
err_ctx:
kfree(ctx);
return ERR_PTR(err);
@@ -1679,6 +1696,8 @@ static void gem_context_register(struct i915_gem_context *ctx,
ctx->file_priv = fpriv;
ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->client = i915_drm_client_get(fpriv->client);
+
snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
current->comm, pid_nr(ctx->pid));
@@ -1686,6 +1705,10 @@ static void gem_context_register(struct i915_gem_context *ctx,
old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
WARN_ON(old);
+ spin_lock(&ctx->client->ctx_lock);
+ list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
+ spin_unlock(&ctx->client->ctx_lock);
+
spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock);
@@ -1836,7 +1859,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (err)
return err;
- i915_vm_open(vm);
+ i915_vm_get(vm);
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->value = id;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 282cdb8a5c5a..cb78214a7dcd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -293,6 +293,12 @@ struct i915_gem_context {
/** @link: place with &drm_i915_private.context_list */
struct list_head link;
+ /** @client: struct i915_drm_client */
+ struct i915_drm_client *client;
+
+ /** @client_link: for linking onto &i915_drm_client.ctx_list */
+ struct list_head client_link;
+
/**
* @ref: reference count
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index c6eb023d3d86..5802692ea604 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -123,7 +123,7 @@ __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
*/
flags = I915_BO_ALLOC_USER;
- ret = mr->ops->init_object(mr, obj, size, 0, flags);
+ ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
if (ret)
goto object_free;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 13917231ae81..f5062d0c6333 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -66,15 +66,6 @@ err:
return ERR_PTR(ret);
}
-static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg,
- enum dma_data_direction dir)
-{
- dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
- sg_free_table(sg);
- kfree(sg);
-}
-
static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf,
struct iosys_map *map)
{
@@ -102,11 +93,15 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf,
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
int ret;
if (obj->base.size < vma->vm_end - vma->vm_start)
return -EINVAL;
+ if (HAS_LMEM(i915))
+ return drm_gem_prime_mmap(&obj->base, vma);
+
if (!obj->base.filp)
return -ENODEV;
@@ -209,7 +204,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.attach = i915_gem_dmabuf_attach,
.detach = i915_gem_dmabuf_detach,
.map_dma_buf = i915_gem_map_dma_buf,
- .unmap_dma_buf = i915_gem_unmap_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 6ca8929cf6e1..c326bd2b444f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -4,8 +4,9 @@
* Copyright © 2008,2010 Intel Corporation
*/
-#include <linux/intel-iommu.h>
#include <linux/dma-resv.h>
+#include <linux/highmem.h>
+#include <linux/intel-iommu.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -998,11 +999,9 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
}
}
- if (!(ev->flags & EXEC_OBJECT_WRITE)) {
- err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
- if (err)
- return err;
- }
+ err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+ if (err)
+ return err;
GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
@@ -1320,10 +1319,8 @@ static void *reloc_vaddr(struct i915_vma *vma,
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
{
if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
- if (flushes & CLFLUSH_BEFORE) {
- clflushopt(addr);
- mb();
- }
+ if (flushes & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(addr, sizeof(*addr));
*addr = value;
@@ -1335,7 +1332,7 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
* to ensure ordering of clflush wrt to the system.
*/
if (flushes & CLFLUSH_AFTER)
- clflushopt(addr);
+ drm_clflush_virt_range(addr, sizeof(*addr));
} else
*addr = value;
}
@@ -2301,7 +2298,7 @@ static int eb_parse(struct i915_execbuffer *eb)
if (IS_ERR(batch))
return PTR_ERR(batch);
- err = dma_resv_reserve_shared(shadow->obj->base.resv, 1);
+ err = dma_resv_reserve_fences(shadow->obj->base.resv, 1);
if (err)
return err;
@@ -2689,6 +2686,11 @@ eb_select_engine(struct i915_execbuffer *eb)
if (err)
goto err;
+ if (!i915_vm_tryget(ce->vm)) {
+ err = -ENOENT;
+ goto err;
+ }
+
eb->context = ce;
eb->gt = ce->engine->gt;
@@ -2712,6 +2714,7 @@ eb_put_engine(struct i915_execbuffer *eb)
{
struct intel_context *child;
+ i915_vm_put(eb->context->vm);
intel_gt_pm_put(eb->gt);
for_each_child(eb->context, child)
intel_context_put(child);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 444f8268b9c5..8949fb0a944f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <uapi/drm/i915_drm.h>
+
#include "intel_memory_region.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
@@ -66,7 +68,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
#ifdef CONFIG_LOCKDEP
- GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) &&
+ GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
i915_gem_object_evictable(obj));
#endif
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
@@ -100,7 +102,7 @@ __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
resource_size_t page_size,
unsigned int flags)
{
- return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
size, page_size, flags);
}
@@ -135,6 +137,6 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags)
{
- return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
size, 0, flags);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 372bc220faeb..06b1b188ce5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/highmem.h>
#include <linux/sched/mm.h>
#include <drm/drm_cache.h>
@@ -605,6 +606,9 @@ bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
if (!mr)
return false;
+ if (!IS_ALIGNED(obj->base.size, mr->min_page_size))
+ return false;
+
if (obj->mm.region == mr)
return true;
@@ -741,30 +745,19 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
/**
* i915_gem_object_get_moving_fence - Get the object's moving fence if any
* @obj: The object whose moving fence to get.
+ * @fence: The resulting fence
*
* A non-signaled moving fence means that there is an async operation
* pending on the object that needs to be waited on before setting up
* any GPU- or CPU PTEs to the object's pages.
*
- * Return: A refcounted pointer to the object's moving fence if any,
- * NULL otherwise.
+ * Return: Negative error code or 0 for success.
*/
-struct dma_fence *
-i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
-{
- return dma_fence_get(i915_gem_to_ttm(obj)->moving);
-}
-
-void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
- struct dma_fence *fence)
+int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence **fence)
{
- struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
-
- if (*moving == fence)
- return;
-
- dma_fence_put(*moving);
- *moving = dma_fence_get(fence);
+ return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
+ fence);
}
/**
@@ -782,23 +775,16 @@ void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr)
{
- struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
- int ret;
+ long ret;
assert_object_held(obj);
- if (!fence)
- return 0;
- ret = dma_fence_wait(fence, intr);
- if (ret)
- return ret;
+ ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
+ intr, MAX_SCHEDULE_TIMEOUT);
+ if (!ret)
+ ret = -ETIME;
- if (fence->error)
- return fence->error;
-
- i915_gem_to_ttm(obj)->moving = NULL;
- dma_fence_put(fence);
- return 0;
+ return ret < 0 ? ret : 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 02c37fe4a535..e11d82a9f7c3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -520,12 +520,8 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-struct dma_fence *
-i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
-
-void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
- struct dma_fence *fence);
-
+int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence **fence);
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index fd54eb8f4826..2c88bdb8ff7c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -631,6 +631,8 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen;
+ resource_size_t bo_offset;
+
unsigned long scratch;
u64 encode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index ca6faffcc496..0d0e46dae559 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -14,6 +14,7 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_gem_region.h"
+#include "i915_gem_tiling.h"
#include "i915_scatterlist.h"
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 6cf94469d5a8..f46ee16a323a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <uapi/drm/i915_drm.h>
+
#include "intel_memory_region.h"
#include "i915_gem_region.h"
#include "i915_drv.h"
@@ -27,11 +29,12 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
mutex_unlock(&mem->objects.lock);
}
-struct drm_i915_gem_object *
-i915_gem_object_create_region(struct intel_memory_region *mem,
- resource_size_t size,
- resource_size_t page_size,
- unsigned int flags)
+static struct drm_i915_gem_object *
+__i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size,
+ resource_size_t page_size,
+ unsigned int flags)
{
struct drm_i915_gem_object *obj;
resource_size_t default_page_size;
@@ -62,6 +65,9 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
size = round_up(size, default_page_size);
+ if (default_page_size == size)
+ flags |= I915_BO_ALLOC_CONTIGUOUS;
+
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
@@ -83,7 +89,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
if (default_page_size < mem->min_page_size)
flags |= I915_BO_ALLOC_PM_EARLY;
- err = mem->ops->init_object(mem, obj, size, page_size, flags);
+ err = mem->ops->init_object(mem, obj, offset, size, page_size, flags);
if (err)
goto err_object_free;
@@ -95,6 +101,40 @@ err_object_free:
return ERR_PTR(err);
}
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ resource_size_t page_size,
+ unsigned int flags)
+{
+ return __i915_gem_object_create_region(mem, I915_BO_INVALID_OFFSET,
+ size, page_size, flags);
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region_at(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size,
+ unsigned int flags)
+{
+ GEM_BUG_ON(offset == I915_BO_INVALID_OFFSET);
+
+ if (GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
+ GEM_WARN_ON(!IS_ALIGNED(offset, mem->min_page_size)))
+ return ERR_PTR(-EINVAL);
+
+ if (range_overflows(offset, size, resource_size(&mem->region)))
+ return ERR_PTR(-EINVAL);
+
+ if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
+ offset + size > mem->io_size &&
+ !i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
+ return ERR_PTR(-ENOSPC);
+
+ return __i915_gem_object_create_region(mem, offset, size, 0,
+ flags | I915_BO_ALLOC_CONTIGUOUS);
+}
+
/**
* i915_gem_process_region - Iterate over all objects of a region using ops
* to process and optionally skip objects
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index fcaa12d657d4..2dfcc41c0170 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -14,6 +14,8 @@ struct sg_table;
struct i915_gem_apply_to_region;
+#define I915_BO_INVALID_OFFSET ((resource_size_t)-1)
+
/**
* struct i915_gem_apply_to_region_ops - ops to use when iterating over all
* region objects.
@@ -56,6 +58,11 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t size,
resource_size_t page_size,
unsigned int flags);
+struct drm_i915_gem_object *
+i915_gem_object_create_region_at(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size,
+ unsigned int flags);
int i915_gem_process_region(struct intel_memory_region *mr,
struct i915_gem_apply_to_region *apply);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index e92cc9d7257c..2e16e91a5a56 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -12,8 +12,9 @@
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
-#include "i915_gemfs.h"
#include "i915_gem_object.h"
+#include "i915_gem_tiling.h"
+#include "i915_gemfs.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
@@ -551,6 +552,7 @@ static int __create_shmem(struct drm_i915_private *i915,
static int shmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 0bf8f61134af..47b5e0e342ab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -12,9 +12,12 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_region_lmem.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_mchbar_regs.h"
@@ -401,7 +404,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
return 0;
}
- if (intel_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
+ if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
drm_notice(&i915->drm,
"%s, disabling use of stolen memory\n",
"DMAR active");
@@ -492,7 +495,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
/* Exclude the reserved region from driver use */
mem->region.end = reserved_base - 1;
- mem->io_size = resource_size(&mem->region);
+ mem->io_size = min(mem->io_size, resource_size(&mem->region));
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
@@ -679,6 +682,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
@@ -693,12 +697,32 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
if (size == 0)
return -EINVAL;
+ /*
+ * With discrete devices, where we lack a mappable aperture there is no
+ * possible way to ever access this memory on the CPU side.
+ */
+ if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
+ !(flags & I915_BO_ALLOC_GPU_ONLY))
+ return -ENOSPC;
+
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return -ENOMEM;
- ret = i915_gem_stolen_insert_node(i915, stolen, size,
- mem->min_page_size);
+ if (offset != I915_BO_INVALID_OFFSET) {
+ drm_dbg(&i915->drm,
+ "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
+ &offset, &size);
+
+ stolen->start = offset;
+ stolen->size = size;
+ mutex_lock(&i915->mm.stolen_lock);
+ ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
+ mutex_unlock(&i915->mm.stolen_lock);
+ } else {
+ ret = i915_gem_stolen_insert_node(i915, stolen, size,
+ mem->min_page_size);
+ }
if (ret)
goto err_free;
@@ -750,11 +774,6 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
if (GEM_WARN_ON(resource_size(&mem->region) == 0))
return -ENODEV;
- if (!io_mapping_init_wc(&mem->iomap,
- mem->io_start,
- mem->io_size))
- return -EIO;
-
/*
* TODO: For stolen lmem we mostly just care about populating the dsm
* related bits and setting up the drm_mm allocator for the range.
@@ -762,18 +781,26 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
*/
err = i915_gem_init_stolen(mem);
if (err)
- goto err_fini;
+ return err;
+
+ if (mem->io_size && !io_mapping_init_wc(&mem->iomap,
+ mem->io_start,
+ mem->io_size)) {
+ err = -EIO;
+ goto err_cleanup;
+ }
return 0;
-err_fini:
- io_mapping_fini(&mem->iomap);
+err_cleanup:
+ i915_gem_cleanup_stolen(mem->i915);
return err;
}
static int release_stolen_lmem(struct intel_memory_region *mem)
{
- io_mapping_fini(&mem->iomap);
+ if (mem->io_size)
+ io_mapping_fini(&mem->iomap);
i915_gem_cleanup_stolen(mem->i915);
return 0;
}
@@ -790,25 +817,43 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
{
struct intel_uncore *uncore = &i915->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ resource_size_t dsm_size, dsm_base, lmem_size;
struct intel_memory_region *mem;
+ resource_size_t io_start, io_size;
resource_size_t min_page_size;
- resource_size_t io_start;
- resource_size_t lmem_size;
- u64 lmem_base;
- lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
- if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2)))
+ if (WARN_ON_ONCE(instance))
return ERR_PTR(-ENODEV);
- lmem_size = pci_resource_len(pdev, 2) - lmem_base;
- io_start = pci_resource_start(pdev, 2) + lmem_base;
+ /* Use DSM base address instead for stolen memory */
+ dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
+ if (IS_DG1(uncore->i915)) {
+ lmem_size = pci_resource_len(pdev, 2);
+ if (WARN_ON(lmem_size < dsm_base))
+ return ERR_PTR(-ENODEV);
+ } else {
+ resource_size_t lmem_range;
+
+ lmem_range = intel_gt_read_register(&i915->gt0, XEHPSDV_TILE0_ADDR_RANGE) & 0xFFFF;
+ lmem_size = lmem_range >> XEHPSDV_TILE_LMEM_RANGE_SHIFT;
+ lmem_size *= SZ_1G;
+ }
+
+ dsm_size = lmem_size - dsm_base;
+ if (pci_resource_len(pdev, 2) < lmem_size) {
+ io_start = 0;
+ io_size = 0;
+ } else {
+ io_start = pci_resource_start(pdev, 2) + dsm_base;
+ io_size = dsm_size;
+ }
min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
I915_GTT_PAGE_SIZE_4K;
- mem = intel_memory_region_create(i915, lmem_base, lmem_size,
+ mem = intel_memory_region_create(i915, dsm_base, dsm_size,
min_page_size,
- io_start, lmem_size,
+ io_start, io_size,
type, instance,
&i915_region_stolen_lmem_ops);
if (IS_ERR(mem))
@@ -822,6 +867,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
&mem->io_start);
+ drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &dsm_base);
intel_memory_region_set_name(mem, "stolen-local");
@@ -850,63 +896,6 @@ i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
return mem;
}
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
- resource_size_t stolen_offset,
- resource_size_t size)
-{
- struct intel_memory_region *mem = i915->mm.stolen_region;
- struct drm_i915_gem_object *obj;
- struct drm_mm_node *stolen;
- int ret;
-
- if (!drm_mm_initialized(&i915->mm.stolen))
- return ERR_PTR(-ENODEV);
-
- drm_dbg(&i915->drm,
- "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
- &stolen_offset, &size);
-
- /* KISS and expect everything to be page-aligned */
- if (GEM_WARN_ON(size == 0) ||
- GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
- GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
- return ERR_PTR(-EINVAL);
-
- stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
- if (!stolen)
- return ERR_PTR(-ENOMEM);
-
- stolen->start = stolen_offset;
- stolen->size = size;
- mutex_lock(&i915->mm.stolen_lock);
- ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
- mutex_unlock(&i915->mm.stolen_lock);
- if (ret)
- goto err_free;
-
- obj = i915_gem_object_alloc();
- if (!obj) {
- ret = -ENOMEM;
- goto err_stolen;
- }
-
- ret = __i915_gem_object_create_stolen(mem, obj, stolen);
- if (ret)
- goto err_object_free;
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
- return obj;
-
-err_object_free:
- i915_gem_object_free(obj);
-err_stolen:
- i915_gem_stolen_remove_node(i915, stolen);
-err_free:
- kfree(stolen);
- return ERR_PTR(ret);
-}
-
bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
{
return obj->ops == &i915_gem_object_stolen_ops;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index ccdf7befc571..d5005a39d130 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -31,10 +31,6 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
resource_size_t size);
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- resource_size_t stolen_offset,
- resource_size_t size);
bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index d6adda5bf96b..80ac0db1ae8c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -219,6 +219,14 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
return ret;
}
+bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ return to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ i915_gem_object_is_tiled(obj);
+}
+
int
i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.h b/drivers/gpu/drm/i915/gem/i915_gem_tiling.h
index 9924196a8139..6bd5751abf28 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.h
@@ -8,8 +8,10 @@
#include <linux/types.h>
+struct drm_i915_gem_object;
struct drm_i915_private;
+bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
u32 i915_gem_fence_size(struct drm_i915_private *i915, u32 size,
unsigned int tiling, unsigned int stride);
u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 45cc5837ce00..4c25d9b2f138 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -20,6 +20,7 @@
#include "gem/i915_gem_ttm.h"
#include "gem/i915_gem_ttm_move.h"
#include "gem/i915_gem_ttm_pm.h"
+#include "gt/intel_gpu_commands.h"
#define I915_TTM_PRIO_PURGE 0
#define I915_TTM_PRIO_NO_PAGES 1
@@ -126,14 +127,22 @@ i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
static void
i915_ttm_place_from_region(const struct intel_memory_region *mr,
struct ttm_place *place,
+ resource_size_t offset,
+ resource_size_t size,
unsigned int flags)
{
memset(place, 0, sizeof(*place));
place->mem_type = intel_region_to_ttm_type(mr);
+ if (mr->type == INTEL_MEMORY_SYSTEM)
+ return;
+
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place->flags |= TTM_PL_FLAG_CONTIGUOUS;
- if (mr->io_size && mr->io_size < mr->total) {
+ if (offset != I915_BO_INVALID_OFFSET) {
+ place->fpfn = offset >> PAGE_SHIFT;
+ place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
+ } else if (mr->io_size && mr->io_size < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
@@ -155,12 +164,14 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
placement->num_placement = 1;
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
- obj->mm.region, requested, flags);
+ obj->mm.region, requested, obj->bo_offset,
+ obj->base.size, flags);
/* Cache this on object? */
placement->num_busy_placement = num_allowed;
for (i = 0; i < placement->num_busy_placement; ++i)
- i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
+ i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
+ obj->bo_offset, obj->base.size, flags);
if (num_allowed == 0) {
*busy = *requested;
@@ -255,12 +266,33 @@ static const struct i915_refct_sgt_ops tt_rsgt_ops = {
.release = i915_ttm_tt_release
};
+static inline bool
+i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
+{
+ bool lmem_placement = false;
+ int i;
+
+ for (i = 0; i < obj->mm.n_placements; i++) {
+ /* Compression is not allowed for the objects with smem placement */
+ if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
+ return false;
+ if (!lmem_placement &&
+ obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
+ lmem_placement = true;
+ }
+
+ return lmem_placement;
+}
+
static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
+ struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
+ bdev);
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ unsigned long ccs_pages = 0;
enum ttm_caching caching;
struct i915_ttm_tt *i915_tt;
int ret;
@@ -283,7 +315,12 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
i915_tt->is_shmem = true;
}
- ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching);
+ if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj))
+ ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
+ NUM_BYTES_PER_CCS_BYTE),
+ PAGE_SIZE);
+
+ ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, ccs_pages);
if (ret)
goto err_free;
@@ -763,6 +800,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
i915_sg_dma_sizes(rsgt->table.sgl));
}
+ GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages));
i915_ttm_adjust_lru(obj);
return ret;
}
@@ -802,7 +840,8 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
struct ttm_placement placement;
int ret;
- i915_ttm_place_from_region(mr, &requested, flags);
+ i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
+ obj->base.size, flags);
placement.num_placement = 1;
placement.num_busy_placement = 1;
placement.placement = &requested;
@@ -936,7 +975,7 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
bo->priority = I915_TTM_PRIO_HAS_PAGES;
}
- ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+ ttm_bo_move_to_lru_tail(bo);
spin_unlock(&bo->bdev->lru_lock);
}
@@ -1142,6 +1181,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
*/
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
@@ -1158,6 +1198,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
+ obj->bo_offset = offset;
+
/* Don't put on a region list until we're either locked or fully initialized. */
obj->mm.region = mem;
INIT_LIST_HEAD(&obj->mm.region_link);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 9d698ad00853..73e371aa3850 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -45,6 +45,7 @@ i915_ttm_to_gem(struct ttm_buffer_object *bo)
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 1ebe6e4086a1..a10716f4e717 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -467,19 +467,6 @@ out:
return fence;
}
-static int
-prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- struct i915_deps *deps)
-{
- int ret;
-
- ret = i915_deps_add_dependency(deps, bo->moving, ctx);
- if (!ret)
- ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
-
- return ret;
-}
-
/**
* i915_ttm_move - The TTM move callback used by i915.
* @bo: The buffer object.
@@ -534,7 +521,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
struct i915_deps deps;
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
- ret = prev_deps(bo, ctx, &deps);
+ ret = i915_deps_add_resv(&deps, bo->base.resv, ctx);
if (ret) {
i915_refct_sgt_put(dst_rsgt);
return ret;
@@ -611,7 +598,11 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
assert_object_held(src);
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
- ret = dma_resv_reserve_shared(src_bo->base.resv, 1);
+ ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
+ if (ret)
+ return ret;
+
+ ret = dma_resv_reserve_fences(dst_bo->base.resv, 1);
if (ret)
return ret;
@@ -633,9 +624,8 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
if (IS_ERR_OR_NULL(copy_fence))
return PTR_ERR_OR_ZERO(copy_fence);
- dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
- dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
-
+ dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ);
dma_fence_put(copy_fence);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 6d1a71d6404c..094f06b4ce33 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -86,7 +86,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
return true;
/* we will unbind on next submission, still have userptr pins */
- r = dma_resv_wait_timeout(obj->base.resv, true, false,
+ r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index dab3d30c09a0..319936f91ac5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -40,7 +40,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
struct dma_fence *fence;
long ret = timeout ?: 1;
- dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
+ dma_resv_iter_begin(&cursor, resv,
+ dma_resv_usage_rw(flags & I915_WAIT_ALL));
dma_resv_for_each_fence_unlocked(&cursor, fence) {
ret = i915_gem_object_wait_fence(fence, flags, timeout);
if (ret <= 0)
@@ -117,7 +118,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL);
+ dma_resv_iter_begin(&cursor, obj->base.resv,
+ dma_resv_usage_rw(flags & I915_WAIT_ALL));
dma_resv_for_each_fence_unlocked(&cursor, fence)
i915_gem_fence_wait_priority(fence, attr);
dma_resv_iter_end(&cursor);
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 7271fbf813fa..ee87874e59dc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -9,6 +9,7 @@
#include "i915_drv.h"
#include "i915_gemfs.h"
+#include "i915_utils.h"
int i915_gemfs_init(struct drm_i915_private *i915)
{
@@ -32,7 +33,7 @@ int i915_gemfs_init(struct drm_i915_private *i915)
*/
opts = NULL;
- if (intel_vtd_active(i915)) {
+ if (i915_vtd_active(i915)) {
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
opts = huge_opt;
drm_info(&i915->drm,
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 7a84fa68a99c..ef15967be51a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -5,6 +5,8 @@
*/
#include <linux/prime_numbers.h>
+#include <linux/string_helpers.h>
+#include <linux/swap.h>
#include "i915_selftest.h"
@@ -804,7 +806,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
if (vma->resource->page_sizes_gtt != expected_gtt) {
pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
vma->resource->page_sizes_gtt, expected_gtt,
- obj->base.size, yesno(!!single));
+ obj->base.size, str_yes_no(!!single));
err = -EINVAL;
break;
}
@@ -960,7 +962,7 @@ static int igt_mock_ppgtt_64K(void *arg)
if (vma->resource->page_sizes_gtt != expected_gtt) {
pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
vma->resource->page_sizes_gtt,
- expected_gtt, i, yesno(!!single));
+ expected_gtt, i, str_yes_no(!!single));
err = -EINVAL;
goto out_vma_unpin;
}
@@ -1706,14 +1708,14 @@ static int igt_shrink_thp(void *arg)
I915_SHRINK_WRITEBACK);
if (should_swap == i915_gem_object_has_pages(obj)) {
pr_err("unexpected pages mismatch, should_swap=%s\n",
- yesno(should_swap));
+ str_yes_no(should_swap));
err = -EINVAL;
goto out_put;
}
if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
pr_err("unexpected residual page-size bits, should_swap=%s\n",
- yesno(should_swap));
+ str_yes_no(should_swap));
err = -EINVAL;
goto out_put;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7609db87df05..93a67422ca3b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -5,6 +5,7 @@
*/
#include <linux/prime_numbers.h>
+#include <linux/string_helpers.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
@@ -700,7 +701,7 @@ static int igt_ctx_exec(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name,
- yesno(i915_gem_context_has_full_ppgtt(ctx)),
+ str_yes_no(i915_gem_context_has_full_ppgtt(ctx)),
err);
intel_context_put(ce);
kernel_context_close(ctx);
@@ -834,7 +835,7 @@ static int igt_shared_ctx_exec(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name,
- yesno(i915_gem_context_has_full_ppgtt(ctx)),
+ str_yes_no(i915_gem_context_has_full_ppgtt(ctx)),
err);
intel_context_put(ce);
kernel_context_close(ctx);
@@ -1415,7 +1416,7 @@ static int igt_ctx_readonly(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
ce->engine->name,
- yesno(i915_gem_context_has_full_ppgtt(ctx)),
+ str_yes_no(i915_gem_context_has_full_ppgtt(ctx)),
err);
i915_gem_context_unlock_engines(ctx);
goto out_file;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index b071a58dd6da..62c61af77a42 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -88,7 +88,7 @@ out:
static int igt_dmabuf_import_same_driver_lmem(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM];
+ struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM_0];
struct drm_i915_gem_object *obj;
struct drm_gem_object *import;
struct dma_buf *dmabuf;
@@ -219,7 +219,8 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_detach;
}
- timeout = dma_resv_wait_timeout(dmabuf->resv, false, true, 5 * HZ);
+ timeout = dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE,
+ true, 5 * HZ);
if (!timeout) {
pr_err("dmabuf wait for exclusive fence timed out.\n");
timeout = -ETIME;
@@ -252,10 +253,10 @@ static int igt_dmabuf_import_same_driver_lmem_smem(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_memory_region *regions[2];
- if (!i915->mm.regions[INTEL_REGION_LMEM])
+ if (!i915->mm.regions[INTEL_REGION_LMEM_0])
return 0;
- regions[0] = i915->mm.regions[INTEL_REGION_LMEM];
+ regions[0] = i915->mm.regions[INTEL_REGION_LMEM_0];
regions[1] = i915->mm.regions[INTEL_REGION_SMEM];
return igt_dmabuf_import_same_driver(i915, regions, 2);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index d534141b2cf7..801af51aff62 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -47,14 +47,16 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
{
struct drm_i915_private *i915 = gt->i915;
struct intel_memory_region *src_mr = i915->mm.regions[src];
+ struct intel_memory_region *dst_mr = i915->mm.regions[dst];
struct drm_i915_gem_object *obj;
struct i915_gem_ww_ctx ww;
int err = 0;
GEM_BUG_ON(!src_mr);
+ GEM_BUG_ON(!dst_mr);
/* Switch object backing-store on create */
- obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0);
+ obj = i915_gem_object_create_region(src_mr, dst_mr->min_page_size, 0, 0);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -92,17 +94,17 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
static int igt_smem_create_migrate(void *arg)
{
- return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_SMEM);
+ return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_SMEM);
}
static int igt_lmem_create_migrate(void *arg)
{
- return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM);
+ return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM_0);
}
static int igt_same_create_migrate(void *arg)
{
- return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_LMEM);
+ return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_LMEM_0);
}
static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
@@ -152,7 +154,7 @@ static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
}
} else {
- err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM);
+ err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM_0);
if (err) {
pr_err("Object failed migration to lmem\n");
if (err)
@@ -216,8 +218,10 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
i915_gem_object_is_lmem(obj),
0xdeadbeaf, &rq);
if (rq) {
- dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
- i915_gem_object_set_moving_fence(obj, &rq->fence);
+ err = dma_resv_reserve_fences(obj->base.resv, 1);
+ if (!err)
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_KERNEL);
i915_request_put(rq);
}
if (err)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index a132e241c3ee..5bc93a1ce3e3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -4,6 +4,7 @@
* Copyright © 2016 Intel Corporation
*/
+#include <linux/highmem.h>
#include <linux/prime_numbers.h>
#include "gem/i915_gem_internal.h"
@@ -1220,8 +1221,8 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
expand32(POISON_INUSE), &rq);
i915_gem_object_unpin_pages(obj);
if (rq) {
- dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
- i915_gem_object_set_moving_fence(obj, &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_KERNEL);
i915_request_put(rq);
}
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 6d6082b5f31f..8ac6726ec16b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -42,8 +42,7 @@ mock_context(struct drm_i915_private *i915,
if (!ppgtt)
goto err_free;
- ctx->vm = i915_vm_open(&ppgtt->vm);
- i915_vm_put(&ppgtt->vm);
+ ctx->vm = &ppgtt->vm;
}
mutex_init(&ctx->engines_mutex);
@@ -59,7 +58,7 @@ mock_context(struct drm_i915_private *i915,
err_vm:
if (ctx->vm)
- i915_vm_close(ctx->vm);
+ i915_vm_put(ctx->vm);
err_free:
kfree(ctx);
return NULL;
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 871fe7bda0e0..1bb766c79dcb 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -322,7 +322,7 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
- GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
+ GEM_BUG_ON(!kref_read(&ppgtt->base.vm.ref));
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index b1b9c3fd7bf9..3e13960615bd 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -5,8 +5,8 @@
#include "gen8_engine_cs.h"
#include "i915_drv.h"
+#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
-#include "intel_gt_regs.h"
#include "intel_lrc.h"
#include "intel_ring.h"
@@ -165,33 +165,9 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
-static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
+u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg)
{
- static const i915_reg_t vd[] = {
- GEN12_VD0_AUX_NV,
- GEN12_VD1_AUX_NV,
- GEN12_VD2_AUX_NV,
- GEN12_VD3_AUX_NV,
- };
-
- static const i915_reg_t ve[] = {
- GEN12_VE0_AUX_NV,
- GEN12_VE1_AUX_NV,
- };
-
- if (engine->class == VIDEO_DECODE_CLASS)
- return vd[engine->instance];
-
- if (engine->class == VIDEO_ENHANCEMENT_CLASS)
- return ve[engine->instance];
-
- GEM_BUG_ON("unknown aux_inv reg\n");
- return INVALID_MMIO_REG;
-}
-
-static u32 *gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
-{
- *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
*cs++ = i915_mmio_reg_offset(inv_reg);
*cs++ = AUX_INV;
*cs++ = MI_NOOP;
@@ -236,7 +212,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (mode & EMIT_INVALIDATE) {
u32 flags = 0;
- u32 *cs;
+ u32 *cs, count;
flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TLB_INVALIDATE;
@@ -254,7 +230,12 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (engine->class == COMPUTE_CLASS)
flags &= ~PIPE_CONTROL_3D_FLAGS;
- cs = intel_ring_begin(rq, 8 + 4);
+ if (!HAS_FLAT_CCS(rq->engine->i915))
+ count = 8 + 4;
+ else
+ count = 8;
+
+ cs = intel_ring_begin(rq, count);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -267,8 +248,10 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- /* hsdes: 1809175790 */
- cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
+ if (!HAS_FLAT_CCS(rq->engine->i915)) {
+ /* hsdes: 1809175790 */
+ cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+ }
*cs++ = preparser_disable(false);
intel_ring_advance(rq, cs);
@@ -283,12 +266,17 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
u32 cmd, *cs;
cmd = 4;
- if (mode & EMIT_INVALIDATE)
+ if (mode & EMIT_INVALIDATE) {
cmd += 2;
- if (mode & EMIT_INVALIDATE)
- aux_inv = rq->engine->mask & ~BIT(BCS0);
- if (aux_inv)
- cmd += 2 * hweight32(aux_inv) + 2;
+
+ if (!HAS_FLAT_CCS(rq->engine->i915) &&
+ (rq->engine->class == VIDEO_DECODE_CLASS ||
+ rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
+ aux_inv = rq->engine->mask & ~BIT(BCS0);
+ if (aux_inv)
+ cmd += 4;
+ }
+ }
cs = intel_ring_begin(rq, cmd);
if (IS_ERR(cs))
@@ -319,15 +307,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
*cs++ = 0; /* value */
if (aux_inv) { /* hsdes: 1809175790 */
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- *cs++ = MI_LOAD_REGISTER_IMM(hweight32(aux_inv));
- for_each_engine_masked(engine, rq->engine->gt, aux_inv, tmp) {
- *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
- *cs++ = AUX_INV;
- }
- *cs++ = MI_NOOP;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
+ else
+ cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
}
if (mode & EMIT_INVALIDATE)
@@ -403,6 +386,59 @@ int gen8_emit_init_breadcrumb(struct i915_request *rq)
return 0;
}
+static int __gen125_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags,
+ u32 arb)
+{
+ struct intel_context *ce = rq->context;
+ u32 wa_offset = lrc_indirect_bb(ce);
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | arb;
+
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(RING_PREDICATE_RESULT(0));
+ *cs++ = wa_offset + DG2_PREDICATE_RESULT_WA;
+ *cs++ = 0;
+
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ /* Fixup stray MI_SET_PREDICATE as it prevents us executing the ring */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8;
+ *cs++ = wa_offset + DG2_PREDICATE_RESULT_BB;
+ *cs++ = 0;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen125_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ return __gen125_emit_bb_start(rq, offset, len, flags, MI_ARB_DISABLE);
+}
+
+int gen125_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ return __gen125_emit_bb_start(rq, offset, len, flags, MI_ARB_ENABLE);
+}
+
int gen8_emit_bb_start_noarb(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
@@ -601,6 +637,43 @@ static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
return cs;
}
+/* Wa_14014475959:dg2 */
+#define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
+static u32 ccs_semaphore_offset(struct i915_request *rq)
+{
+ return i915_ggtt_offset(rq->context->state) +
+ (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
+}
+
+/* Wa_14014475959:dg2 */
+static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
+{
+ int i;
+
+ *cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
+ MI_ATOMIC_MOVE;
+ *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /*
+ * When MI_ATOMIC_INLINE_DATA set this command must be 11 DW + (1 NOP)
+ * to align. 4 DWs above + 8 filler DWs here.
+ */
+ for (i = 0; i < 8; ++i)
+ *cs++ = 0;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = 0;
+
+ return cs;
+}
+
static __always_inline u32*
gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
{
@@ -611,6 +684,10 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
!intel_uc_uses_guc_submission(&rq->engine->gt->uc))
cs = gen12_emit_preempt_busywait(rq, cs);
+ /* Wa_14014475959:dg2 */
+ if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
+ cs = ccs_emit_wa_busywait(rq, cs);
+
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
index cc6e21d3662a..32e3d2b831bb 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -10,7 +10,7 @@
#include <linux/types.h>
#include "i915_gem.h" /* GEM_BUG_ON */
-
+#include "intel_gt_regs.h"
#include "intel_gpu_commands.h"
struct i915_request;
@@ -31,6 +31,13 @@ int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags);
+int gen125_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+int gen125_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+
u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
@@ -38,6 +45,8 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg);
+
static inline u32 *
__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
{
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index f574da00eff1..c7bd5d71b03e 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -454,11 +454,11 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
pd = pdp->entry[gen8_pd_index(idx, 2)];
}
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
return idx;
}
@@ -631,7 +631,7 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
}
} while (rem >= page_size && index < I915_PDES);
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
/*
* Is it safe to mark the 2M block as 64K? -- Either we have
@@ -647,7 +647,7 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
I915_GTT_PAGE_SIZE_2M)))) {
vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
page_size = I915_GTT_PAGE_SIZE_64K;
/*
@@ -668,7 +668,7 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
}
}
@@ -722,7 +722,7 @@ static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
vaddr = px_vaddr(pt);
vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags);
- clflush_cache_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
+ drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
}
static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 209cf265bf74..9dc9dccf7b09 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -4,6 +4,7 @@
*/
#include <linux/kthread.h>
+#include <linux/string_helpers.h>
#include <trace/events/dma_fence.h>
#include <uapi/linux/sched/types.h>
@@ -512,7 +513,7 @@ void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
if (!b)
return;
- drm_printf(p, "IRQ: %s\n", enableddisabled(b->irq_armed));
+ drm_printf(p, "IRQ: %s\n", str_enabled_disabled(b->irq_armed));
if (!list_empty(&b->signalers))
print_signals(b, p);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 5d0ec7c49b6a..4070cb5711d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -386,7 +386,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
ce->ring = NULL;
ce->ring_size = SZ_4K;
- ewma_runtime_init(&ce->runtime.avg);
+ ewma_runtime_init(&ce->stats.runtime.avg);
ce->vm = i915_vm_get(engine->gt->vm);
@@ -400,7 +400,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
INIT_LIST_HEAD(&ce->guc_state.fences);
INIT_LIST_HEAD(&ce->guc_state.requests);
- ce->guc_id.id = GUC_INVALID_LRC_ID;
+ ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
INIT_LIST_HEAD(&ce->guc_id.link);
INIT_LIST_HEAD(&ce->destroyed_link);
@@ -576,6 +576,31 @@ void intel_context_bind_parent_child(struct intel_context *parent,
child->parallel.parent = parent;
}
+u64 intel_context_get_total_runtime_ns(const struct intel_context *ce)
+{
+ u64 total, active;
+
+ total = ce->stats.runtime.total;
+ if (ce->ops->flags & COPS_RUNTIME_CYCLES)
+ total *= ce->engine->gt->clock_period_ns;
+
+ active = READ_ONCE(ce->stats.active);
+ if (active)
+ active = intel_context_clock() - active;
+
+ return total + active;
+}
+
+u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
+{
+ u64 avg = ewma_runtime_read(&ce->stats.runtime.avg);
+
+ if (ce->ops->flags & COPS_RUNTIME_CYCLES)
+ avg *= ce->engine->gt->clock_period_ns;
+
+ return avg;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index d8c74bbf9aae..b7d3214d2cdd 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -351,18 +351,13 @@ intel_context_clear_nopreempt(struct intel_context *ce)
clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
-static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
-{
- const u32 period = ce->engine->gt->clock_period_ns;
-
- return READ_ONCE(ce->runtime.total) * period;
-}
+u64 intel_context_get_total_runtime_ns(const struct intel_context *ce);
+u64 intel_context_get_avg_runtime_ns(struct intel_context *ce);
-static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
+static inline u64 intel_context_clock(void)
{
- const u32 period = ce->engine->gt->clock_period_ns;
-
- return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
+ /* As we mix CS cycles with CPU clocks, use the raw monotonic clock. */
+ return ktime_get_raw_fast_ns();
}
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 30cd81ad8911..09f82545789f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -35,6 +35,9 @@ struct intel_context_ops {
#define COPS_HAS_INFLIGHT_BIT 0
#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
+#define COPS_RUNTIME_CYCLES_BIT 1
+#define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT)
+
int (*alloc)(struct intel_context *ce);
void (*ban)(struct intel_context *ce, struct i915_request *rq);
@@ -134,14 +137,19 @@ struct intel_context {
} lrc;
u32 tag; /* cookie passed to HW to track this context on submission */
- /* Time on GPU as tracked by the hw. */
- struct {
- struct ewma_runtime avg;
- u64 total;
- u32 last;
- I915_SELFTEST_DECLARE(u32 num_underflow);
- I915_SELFTEST_DECLARE(u32 max_underflow);
- } runtime;
+ /** stats: Context GPU engine busyness tracking. */
+ struct intel_context_stats {
+ u64 active;
+
+ /* Time on GPU as tracked by the hw. */
+ struct {
+ struct ewma_runtime avg;
+ u64 total;
+ u32 last;
+ I915_SELFTEST_DECLARE(u32 num_underflow);
+ I915_SELFTEST_DECLARE(u32 max_underflow);
+ } runtime;
+ } stats;
unsigned int active_count; /* protected by timeline->mutex */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 1c0ab05c3c40..1431f1e9dbee 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -4,6 +4,7 @@
#include <asm/cacheflush.h>
#include <drm/drm_util.h>
+#include <drm/drm_cache.h>
#include <linux/hashtable.h>
#include <linux/irq_work.h>
@@ -143,15 +144,9 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
* of extra paranoia to try and ensure that the HWS takes the value
* we give and that it doesn't end up trapped inside the CPU!
*/
- if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
- mb();
- clflush(&engine->status_page.addr[reg]);
- engine->status_page.addr[reg] = value;
- clflush(&engine->status_page.addr[reg]);
- mb();
- } else {
- WRITE_ONCE(engine->status_page.addr[reg], value);
- }
+ drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value));
+ WRITE_ONCE(engine->status_page.addr[reg], value);
+ drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value));
}
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index e1aa78b20d2d..14c6ddbbfde8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -3,6 +3,8 @@
* Copyright © 2016 Intel Corporation
*/
+#include <linux/string_helpers.h>
+
#include <drm/drm_print.h>
#include "gem/i915_gem_context.h"
@@ -434,6 +436,11 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
engine->props.preempt_timeout_ms = 0;
+ if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
+ __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
+ engine->class == RENDER_CLASS)
+ engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
+
/* features common between engines sharing EUs */
if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
@@ -724,12 +731,24 @@ static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
{
- int i;
- u8 map[MAX_ENGINE_INSTANCE + 1];
+ /*
+ * Logical to physical mapping is needed for proper support
+ * to split-frame feature.
+ */
+ if (MEDIA_VER(gt->i915) >= 11 && class == VIDEO_DECODE_CLASS) {
+ const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 };
+
+ populate_logical_ids(gt, logical_ids, class,
+ map, ARRAY_SIZE(map));
+ } else {
+ int i;
+ u8 map[MAX_ENGINE_INSTANCE + 1];
- for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
- map[i] = i;
- populate_logical_ids(gt, logical_ids, class, map, ARRAY_SIZE(map));
+ for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
+ map[i] = i;
+ populate_logical_ids(gt, logical_ids, class,
+ map, ARRAY_SIZE(map));
+ }
}
/**
@@ -1261,6 +1280,15 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
int err;
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+
+ /*
+ * Wa_22011802037 : gen12, Prior to doing a reset, ensure CS is
+ * stopped, set ring stop bit and prefetch disable bit to halt CS
+ */
+ if (GRAPHICS_VER(engine->i915) == 12)
+ intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
+ _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
+
err = __intel_wait_for_register_fw(engine->uncore, mode,
MODE_IDLE, MODE_IDLE,
fast_timeout_us,
@@ -1695,9 +1723,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (intel_engine_uses_guc(engine)) {
- /* nothing to print yet */
- } else if (HAS_EXECLISTS(dev_priv)) {
+ if (HAS_EXECLISTS(dev_priv) && !intel_engine_uses_guc(engine)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -1706,9 +1732,8 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
u8 read, write;
drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
- yesno(test_bit(TASKLET_STATE_SCHED,
- &engine->sched_engine->tasklet.state)),
- enableddisabled(!atomic_read(&engine->sched_engine->tasklet.count)),
+ str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)),
+ str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)),
repr_timer(&engine->execlists.preempt),
repr_timer(&engine->execlists.timer));
@@ -1969,7 +1994,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
drm_printf(m, "\tBarriers?: %s\n",
- yesno(!llist_empty(&engine->barrier_tasks)));
+ str_yes_no(!llist_empty(&engine->barrier_tasks)));
drm_printf(m, "\tLatency: %luus\n",
ewma__engine_latency_read(&engine->latency));
if (intel_engine_supports_stats(engine))
@@ -2011,7 +2036,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.addr, PAGE_SIZE);
- drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
+ drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine)));
intel_engine_print_breadcrumbs(engine, m);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index 0bf8b45c9319..75a0c55c5aa5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -148,6 +148,7 @@
(REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 1) | \
REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1))
+#define RING_PREDICATE_RESULT(base) _MMIO((base) + 0x3b8) /* gen12+ */
#define MI_PREDICATE_RESULT_2(base) _MMIO((base) + 0x3bc)
#define LOWER_SLICE_ENABLED (1 << 0)
#define LOWER_SLICE_DISABLED (0 << 0)
@@ -181,6 +182,7 @@
#define GFX_SURFACE_FAULT_ENABLE (1 << 12)
#define GFX_REPLAY_MODE (1 << 11)
#define GFX_PSMI_GRANULARITY (1 << 10)
+#define GEN12_GFX_PREFETCH_DISABLE REG_BIT(10)
#define GFX_PPGTT_ENABLE (1 << 9)
#define GEN8_GFX_PPGTT_48B (1 << 7)
#define GFX_FORWARD_VBLANK_MASK (3 << 5)
@@ -192,6 +194,7 @@
#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4)
#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */
+#define RING_PREDICATE_RESULT(base) _MMIO((base) + 0x3b8)
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2)
#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 19ff8758e34d..298f2cc7a879 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -96,7 +96,9 @@ struct i915_ctx_workarounds {
#define I915_MAX_VCS 8
#define I915_MAX_VECS 4
+#define I915_MAX_SFC (I915_MAX_VCS / 2)
#define I915_MAX_CCS 4
+#define I915_MAX_RCS 1
/*
* Engine IDs definitions.
@@ -526,6 +528,8 @@ struct intel_engine_cs {
#define I915_ENGINE_WANT_FORCED_PREEMPTION BIT(8)
#define I915_ENGINE_HAS_RCS_REG_STATE BIT(9)
#define I915_ENGINE_HAS_EU_PRIORITY BIT(10)
+#define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11)
+#define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12)
unsigned int flags;
/*
@@ -626,6 +630,13 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO;
}
+/* Wa_14014475959:dg2 */
+static inline bool
+intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+}
+
#define instdone_has_slice(dev_priv___, sseu___, slice___) \
((GRAPHICS_VER(dev_priv___) == 7 ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
@@ -643,7 +654,7 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
#define for_each_instdone_gslice_dss_xehp(dev_priv_, sseu_, iter_, gslice_, dss_) \
for ((iter_) = 0, (gslice_) = 0, (dss_) = 0; \
- (iter_) < GEN_MAX_SUBSLICES; \
+ (iter_) < GEN_SS_MASK_SIZE; \
(iter_)++, (gslice_) = (iter_) / GEN_DSS_PER_GSLICE, \
(dss_) = (iter_) % GEN_DSS_PER_GSLICE) \
for_each_if(intel_sseu_has_subslice((sseu_), 0, (iter_)))
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index b8c9b6b89003..46a174f8aa00 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -47,7 +47,7 @@ static const u8 uabi_classes[] = {
[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
- /* TODO: Add COMPUTE_CLASS mapping once ABI is available */
+ [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
};
static int engine_cmp(void *priv, const struct list_head *A,
@@ -193,7 +193,6 @@ static void add_legacy_ring(struct legacy_ring *ring,
void intel_engines_driver_register(struct drm_i915_private *i915)
{
struct legacy_ring ring = {};
- u8 uabi_instances[5] = {};
struct list_head *it, *next;
struct rb_node **p, *prev;
LIST_HEAD(engines);
@@ -214,8 +213,10 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
- GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
- engine->uabi_instance = uabi_instances[engine->uabi_class]++;
+ GEM_BUG_ON(engine->uabi_class >=
+ ARRAY_SIZE(i915->engine_uabi_class_count));
+ engine->uabi_instance =
+ i915->engine_uabi_class_count[engine->uabi_class]++;
/* Replace the internal name with the final user facing name */
memcpy(old, engine->name, sizeof(engine->name));
@@ -245,8 +246,8 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
int class, inst;
int errors = 0;
- for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
- for (inst = 0; inst < uabi_instances[class]; inst++) {
+ for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
+ for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
engine = intel_engine_lookup_user(i915,
class, inst);
if (!engine) {
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 3e0c81f06bd0..86f7a9ac1c39 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -107,6 +107,7 @@
*
*/
#include <linux/interrupt.h>
+#include <linux/string_helpers.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -624,8 +625,6 @@ static void __execlists_schedule_out(struct i915_request * const rq,
GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
__set_bit(ccid - 1, &engine->context_tag);
}
-
- lrc_update_runtime(ce);
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
if (engine->fw_domain && !--engine->fw_active)
@@ -1335,11 +1334,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
} else if (timeslice_expired(engine, last)) {
ENGINE_TRACE(engine,
"expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
- yesno(timer_expired(&execlists->timer)),
+ str_yes_no(timer_expired(&execlists->timer)),
last->fence.context, last->fence.seqno,
rq_prio(last),
sched_engine->queue_priority_hint,
- yesno(timeslice_yield(execlists, last)));
+ str_yes_no(timeslice_yield(execlists, last)));
/*
* Consume this timeslice; ensure we start a new one.
@@ -1427,7 +1426,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
__i915_request_is_complete(rq) ? "!" :
__i915_request_has_started(rq) ? "*" :
"",
- yesno(engine != ve->siblings[0]));
+ str_yes_no(engine != ve->siblings[0]));
WRITE_ONCE(ve->request, NULL);
WRITE_ONCE(ve->base.sched_engine->queue_priority_hint, INT_MIN);
@@ -1650,12 +1649,6 @@ cancel_port_requests(struct intel_engine_execlists * const execlists,
return inactive;
}
-static void invalidate_csb_entries(const u64 *first, const u64 *last)
-{
- clflush((void *)first);
- clflush((void *)last);
-}
-
/*
* Starting with Gen12, the status has a new format:
*
@@ -2003,15 +1996,30 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* the wash as hardware, working or not, will need to do the
* invalidation before.
*/
- invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
+ drm_clflush_virt_range(&buf[0], num_entries * sizeof(buf[0]));
/*
* We assume that any event reflects a change in context flow
* and merits a fresh timeslice. We reinstall the timer after
* inspecting the queue to see if we need to resumbit.
*/
- if (*prev != *execlists->active) /* elide lite-restores */
+ if (*prev != *execlists->active) { /* elide lite-restores */
+ /*
+ * Note the inherent discrepancy between the HW runtime,
+ * recorded as part of the context switch, and the CPU
+ * adjustment for active contexts. We have to hope that
+ * the delay in processing the CS event is very small
+ * and consistent. It works to our advantage to have
+ * the CPU adjustment _undershoot_ (i.e. start later than)
+ * the CS timestamp so we never overreport the runtime
+ * and correct overselves later when updating from HW.
+ */
+ if (*prev)
+ lrc_runtime_stop((*prev)->context);
+ if (*execlists->active)
+ lrc_runtime_start((*execlists->active)->context);
new_timeslice(execlists);
+ }
return inactive;
}
@@ -2235,11 +2243,11 @@ static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
if (!cap->error)
goto err_cap;
- cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp, CORE_DUMP_FLAG_NONE);
if (!cap->error->gt)
goto err_gpu;
- cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp, CORE_DUMP_FLAG_NONE);
if (!cap->error->gt->engine)
goto err_gt;
@@ -2643,7 +2651,7 @@ unwind:
}
static const struct intel_context_ops execlists_context_ops = {
- .flags = COPS_HAS_INFLIGHT,
+ .flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
.alloc = execlists_context_alloc,
@@ -2787,8 +2795,9 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
/* Check that the GPU does indeed update the CSB entries! */
memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[reset_value]);
+ drm_clflush_virt_range(execlists->csb_status,
+ execlists->csb_size *
+ sizeof(execlists->csb_status));
/* Once more for luck and our trusty paranoia */
ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
@@ -2832,7 +2841,7 @@ static void execlists_sanitize(struct intel_engine_cs *engine)
sanitize_hwsp(engine);
/* And scrub the dirty cachelines for the HWSP */
- clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+ drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
intel_engine_reset_pinned_contexts(engine);
}
@@ -2911,7 +2920,7 @@ static int execlists_resume(struct intel_engine_cs *engine)
enable_execlists(engine);
- if (engine->class == RENDER_CLASS)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
xehp_enable_ccs_engines(engine);
return 0;
@@ -2957,9 +2966,8 @@ reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- mb(); /* paranoia: read the CSB pointers from after the reset */
- clflush(execlists->csb_write);
- mb();
+ drm_clflush_virt_range(execlists->csb_write,
+ sizeof(execlists->csb_write[0]));
inactive = process_csb(engine, inactive); /* drain preemption events */
@@ -3425,10 +3433,17 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
}
}
- if (intel_engine_has_preemption(engine))
- engine->emit_bb_start = gen8_emit_bb_start;
- else
- engine->emit_bb_start = gen8_emit_bb_start_noarb;
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen125_emit_bb_start;
+ else
+ engine->emit_bb_start = gen125_emit_bb_start_noarb;
+ } else {
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
+ }
engine->busyness = execlists_engine_busyness;
}
@@ -3701,7 +3716,7 @@ virtual_get_sibling(struct intel_engine_cs *engine, unsigned int sibling)
}
static const struct intel_context_ops virtual_context_ops = {
- .flags = COPS_HAS_INFLIGHT,
+ .flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
.alloc = virtual_context_alloc,
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 8850d4e0f9cc..e6b2eb122ad7 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -3,21 +3,20 @@
* Copyright © 2020 Intel Corporation
*/
-#include <linux/agp_backend.h>
-#include <linux/stop_machine.h>
-
+#include <linux/types.h>
#include <asm/set_memory.h>
#include <asm/smp.h>
#include <drm/i915_drm.h>
-#include <drm/intel-gtt.h>
#include "gem/i915_gem_lmem.h"
#include "intel_gt.h"
+#include "intel_gt_gmch.h"
#include "intel_gt_regs.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
+#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_gtt.h"
@@ -94,28 +93,6 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
return 0;
}
-/*
- * Certain Gen5 chipsets require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static bool needs_idle_maps(struct drm_i915_private *i915)
-{
- /*
- * Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- if (!intel_vtd_active(i915))
- return false;
-
- if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
- return true;
-
- if (GRAPHICS_VER(i915) == 12)
- return true; /* XXX DMAR fault reason 7 */
-
- return false;
-}
-
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
@@ -126,7 +103,7 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
void i915_ggtt_suspend_vm(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
- int open;
+ int save_skip_rewrite;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
@@ -135,8 +112,12 @@ retry:
mutex_lock(&vm->mutex);
- /* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&vm->open, 0);
+ /*
+ * Skip rewriting PTE on VMA unbind.
+ * FIXME: Use an argument to i915_vma_unbind() instead?
+ */
+ save_skip_rewrite = vm->skip_pte_rewrite;
+ vm->skip_pte_rewrite = true;
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -154,16 +135,14 @@ retry:
*/
i915_gem_object_get(obj);
- atomic_set(&vm->open, open);
mutex_unlock(&vm->mutex);
i915_gem_object_lock(obj, NULL);
- open = i915_vma_unbind(vma);
+ GEM_WARN_ON(i915_vma_unbind(vma));
i915_gem_object_unlock(obj);
-
- GEM_WARN_ON(open);
-
i915_gem_object_put(obj);
+
+ vm->skip_pte_rewrite = save_skip_rewrite;
goto retry;
}
@@ -179,7 +158,7 @@ retry:
vm->clear_range(vm, 0, vm->total);
- atomic_set(&vm->open, open);
+ vm->skip_pte_rewrite = save_skip_rewrite;
mutex_unlock(&vm->mutex);
}
@@ -202,7 +181,7 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
spin_unlock_irq(&uncore->lock);
}
-static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
@@ -227,11 +206,6 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
-static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- intel_gtt_chipset_flush();
-}
-
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
@@ -244,258 +218,7 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void gen8_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
-
- ggtt->invalidate(ggtt);
-}
-
-static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *gte;
- gen8_pte_t __iomem *end;
- struct sgt_iter iter;
- dma_addr_t addr;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- gte = (gen8_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
-
- for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
- gen8_set_pte(gte++, pte_encode | addr);
- GEM_BUG_ON(gte > end);
-
- /* Fill the allocated but "unused" space beyond the end of the buffer */
- while (gte < end)
- gen8_set_pte(gte++, vm->scratch[0]->encode);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void gen6_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- iowrite32(vm->pte_encode(addr, level, flags), pte);
-
- ggtt->invalidate(ggtt);
-}
-
-/*
- * Binds an object into the global gtt with the specified cache level.
- * The object will be accessible to the GPU via commands whose operands
- * reference offsets within the global GTT as well as accessible by the GPU
- * through the GMADR mapped BAR (i915->mm.gtt->gtt).
- */
-static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *gte;
- gen6_pte_t __iomem *end;
- struct sgt_iter iter;
- dma_addr_t addr;
-
- gte = (gen6_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
-
- for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
- iowrite32(vm->pte_encode(addr, level, flags), gte++);
- GEM_BUG_ON(gte > end);
-
- /* Fill the allocated but "unused" space beyond the end of the buffer */
- while (gte < end)
- iowrite32(vm->scratch[0]->encode, gte++);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void nop_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
-}
-
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
- gen8_pte_t __iomem *gtt_base =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- for (i = 0; i < num_entries; i++)
- gen8_set_pte(&gtt_base[i], scratch_pte);
-}
-
-static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
-{
- /*
- * Make sure the internal GAM fifo has been cleared of all GTT
- * writes before exiting stop_machine(). This guarantees that
- * any aperture accesses waiting to start in another process
- * cannot back up behind the GTT writes causing a hang.
- * The register can be any arbitrary GAM register.
- */
- intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
-}
-
-struct insert_page {
- struct i915_address_space *vm;
- dma_addr_t addr;
- u64 offset;
- enum i915_cache_level level;
-};
-
-static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
-{
- struct insert_page *arg = _arg;
-
- gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
-{
- struct insert_page arg = { vm, addr, offset, level };
-
- stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
-}
-
-struct insert_entries {
- struct i915_address_space *vm;
- struct i915_vma_resource *vma_res;
- enum i915_cache_level level;
- u32 flags;
-};
-
-static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
-{
- struct insert_entries *arg = _arg;
-
- gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- struct insert_entries arg = { vm, vma_res, level, flags };
-
- stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
-}
-
-static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = vm->scratch[0]->encode;
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, &gtt_base[i]);
-}
-
-static void i915_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-}
-
-static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
- flags);
-}
-
-static void i915_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
-}
-
-static void ggtt_bind_vma(struct i915_address_space *vm,
+void intel_ggtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
@@ -519,7 +242,7 @@ static void ggtt_bind_vma(struct i915_address_space *vm,
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
-static void ggtt_unbind_vma(struct i915_address_space *vm,
+void intel_ggtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
@@ -722,10 +445,10 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
ggtt->alias = ppgtt;
ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
- GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+ GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
- GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+ GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
@@ -748,8 +471,8 @@ static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
i915_vm_put(&ppgtt->vm);
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
}
int i915_init_ggtt(struct drm_i915_private *i915)
@@ -773,13 +496,13 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
{
struct i915_vma *vma, *vn;
- atomic_set(&ggtt->vm.open, 0);
-
flush_workqueue(ggtt->vm.i915->wq);
i915_gem_drain_freed_objects(ggtt->vm.i915);
mutex_lock(&ggtt->vm.mutex);
+ ggtt->vm.skip_pte_rewrite = true;
+
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
bool trylock;
@@ -837,364 +560,12 @@ void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
dma_resv_fini(&ggtt->vm._resv);
}
-static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
- return snb_gmch_ctl << 20;
-}
-
-static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
-{
- bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
- if (bdw_gmch_ctl)
- bdw_gmch_ctl = 1 << bdw_gmch_ctl;
-
-#ifdef CONFIG_X86_32
- /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
- if (bdw_gmch_ctl > 4)
- bdw_gmch_ctl = 4;
-#endif
-
- return bdw_gmch_ctl << 20;
-}
-
-static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
-{
- gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GGMS_MASK;
-
- if (gmch_ctrl)
- return 1 << (20 + gmch_ctrl);
-
- return 0;
-}
-
-static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
-{
- /*
- * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
- * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
- */
- GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
- return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
-}
-
-static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
-{
- return gen6_gttmmadr_size(i915) / 2;
-}
-
-static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- phys_addr_t phys_addr;
- u32 pte_flags;
- int ret;
-
- GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
- phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
-
- /*
- * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
- * will be dropped. For WC mappings in general we have 64 byte burst
- * writes when the WC buffer is flushed, so we can't use it, but have to
- * resort to an uncached mapping. The WC issue is easily caught by the
- * readback check when writing GTT PTE entries.
- */
- if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
- ggtt->gsm = ioremap(phys_addr, size);
- else
- ggtt->gsm = ioremap_wc(phys_addr, size);
- if (!ggtt->gsm) {
- drm_err(&i915->drm, "Failed to map the ggtt page table\n");
- return -ENOMEM;
- }
-
- kref_init(&ggtt->vm.resv_ref);
- ret = setup_scratch_page(&ggtt->vm);
- if (ret) {
- drm_err(&i915->drm, "Scratch setup failed\n");
- /* iounmap will also get called at remove, but meh */
- iounmap(ggtt->gsm);
- return ret;
- }
-
- pte_flags = 0;
- if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
- pte_flags |= PTE_LM;
-
- ggtt->vm.scratch[0]->encode =
- ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
- I915_CACHE_NONE, pte_flags);
-
- return 0;
-}
-
-static void gen6_gmch_remove(struct i915_address_space *vm)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-
- iounmap(ggtt->gsm);
- free_scratch(vm);
-}
-
-static struct resource pci_resource(struct pci_dev *pdev, int bar)
+struct resource intel_pci_resource(struct pci_dev *pdev, int bar)
{
return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
}
-static int gen8_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- unsigned int size;
- u16 snb_gmch_ctl;
-
- /* TODO: We're not aware of mappable constraints on gen8 yet */
- if (!HAS_LMEM(i915)) {
- ggtt->gmadr = pci_resource(pdev, 2);
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
- }
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (IS_CHERRYVIEW(i915))
- size = chv_get_total_gtt_size(snb_gmch_ctl);
- else
- size = gen8_get_total_gtt_size(snb_gmch_ctl);
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
- ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
-
- ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- ggtt->vm.cleanup = gen6_gmch_remove;
- ggtt->vm.insert_page = gen8_ggtt_insert_page;
- ggtt->vm.clear_range = nop_clear_range;
- if (intel_scanout_needs_vtd_wa(i915))
- ggtt->vm.clear_range = gen8_ggtt_clear_range;
-
- ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
-
- /*
- * Serialize GTT updates with aperture access on BXT if VT-d is on,
- * and always on CHV.
- */
- if (intel_vm_no_concurrent_access_wa(i915)) {
- ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- ggtt->vm.bind_async_flags =
- I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- }
-
- ggtt->invalidate = gen8_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-
- ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
-
- setup_private_pat(ggtt->vm.gt->uncore);
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static u64 snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- pte |= GEN7_PTE_CACHE_L3_LLC;
- break;
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- if (!(flags & PTE_READ_ONLY))
- pte |= BYT_PTE_WRITEABLE;
-
- if (level != I915_CACHE_NONE)
- pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
-
- return pte;
-}
-
-static u64 hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- if (level != I915_CACHE_NONE)
- pte |= HSW_WB_LLC_AGE3;
-
- return pte;
-}
-
-static u64 iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_NONE:
- break;
- case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE3;
- break;
- default:
- pte |= HSW_WB_ELLC_LLC_AGE3;
- break;
- }
-
- return pte;
-}
-
-static int gen6_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- unsigned int size;
- u16 snb_gmch_ctl;
-
- ggtt->gmadr = pci_resource(pdev, 2);
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
-
- /*
- * 64/512MB is the current min/max we actually know of, but this is
- * just a coarse sanity check.
- */
- if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
- &ggtt->mappable_end);
- return -ENXIO;
- }
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
- size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
-
- ggtt->vm.clear_range = nop_clear_range;
- if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
- ggtt->vm.clear_range = gen6_ggtt_clear_range;
- ggtt->vm.insert_page = gen6_ggtt_insert_page;
- ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
- ggtt->vm.cleanup = gen6_gmch_remove;
-
- ggtt->invalidate = gen6_ggtt_invalidate;
-
- if (HAS_EDRAM(i915))
- ggtt->vm.pte_encode = iris_pte_encode;
- else if (IS_HASWELL(i915))
- ggtt->vm.pte_encode = hsw_pte_encode;
- else if (IS_VALLEYVIEW(i915))
- ggtt->vm.pte_encode = byt_pte_encode;
- else if (GRAPHICS_VER(i915) >= 7)
- ggtt->vm.pte_encode = ivb_pte_encode;
- else
- ggtt->vm.pte_encode = snb_pte_encode;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static void i915_gmch_remove(struct i915_address_space *vm)
-{
- intel_gmch_remove();
-}
-
-static int i915_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- phys_addr_t gmadr_base;
- int ret;
-
- ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
- if (!ret) {
- drm_err(&i915->drm, "failed to set up gmch\n");
- return -EIO;
- }
-
- intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
-
- ggtt->gmadr =
- (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
-
- if (needs_idle_maps(i915)) {
- drm_notice(&i915->drm,
- "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
- ggtt->do_idle_maps = true;
- }
-
- ggtt->vm.insert_page = i915_ggtt_insert_page;
- ggtt->vm.insert_entries = i915_ggtt_insert_entries;
- ggtt->vm.clear_range = i915_ggtt_clear_range;
- ggtt->vm.cleanup = i915_gmch_remove;
-
- ggtt->invalidate = gmch_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-
- if (unlikely(ggtt->do_idle_maps))
- drm_notice(&i915->drm,
- "Applying Ironlake quirks for intel_iommu\n");
-
- return 0;
-}
-
static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -1206,11 +577,11 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
dma_resv_init(&ggtt->vm._resv);
if (GRAPHICS_VER(i915) <= 5)
- ret = i915_gmch_probe(ggtt);
+ ret = intel_gt_gmch_gen5_probe(ggtt);
else if (GRAPHICS_VER(i915) < 8)
- ret = gen6_gmch_probe(ggtt);
+ ret = intel_gt_gmch_gen6_probe(ggtt);
else
- ret = gen8_gmch_probe(ggtt);
+ ret = intel_gt_gmch_gen8_probe(ggtt);
if (ret) {
dma_resv_fini(&ggtt->vm._resv);
return ret;
@@ -1256,7 +627,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
if (ret)
return ret;
- if (intel_vtd_active(i915))
+ if (i915_vtd_active(i915))
drm_info(&i915->drm, "VT-d active for gfx access\n");
return 0;
@@ -1264,10 +635,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
{
- if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
- return -EIO;
-
- return 0;
+ return intel_gt_gmch_gen5_enable_hw(i915);
}
void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
@@ -1307,16 +675,12 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
bool write_domain_objs = false;
- int open;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
/* First fill our portion of the GTT with scratch pages */
vm->clear_range(vm, 0, vm->total);
- /* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&vm->open, 0);
-
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -1333,8 +697,6 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
}
}
- atomic_set(&vm->open, open);
-
return write_domain_objs;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 76880fb8fc19..6ebda3d65086 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -3,6 +3,8 @@
* Copyright © 2008-2015 Intel Corporation
*/
+#include <linux/highmem.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_scatterlist.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index d112ffd56418..556bca3be804 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -39,6 +39,8 @@
#define MI_GLOBAL_GTT (1<<22)
#define MI_NOOP MI_INSTR(0, 0)
+#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
+#define MI_SET_PREDICATE_DISABLE (0 << 0)
#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
@@ -134,6 +136,13 @@
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
#define MI_USE_GGTT (1 << 22) /* g4x+ */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_ATOMIC MI_INSTR(0x2f, 1)
+#define MI_ATOMIC_INLINE (MI_INSTR(0x2f, 9) | MI_ATOMIC_INLINE_DATA)
+#define MI_ATOMIC_GLOBAL_GTT (1 << 22)
+#define MI_ATOMIC_INLINE_DATA (1 << 18)
+#define MI_ATOMIC_CS_STALL (1 << 17)
+#define MI_ATOMIC_MOVE (0x4 << 8)
+
/*
* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
* - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
@@ -144,6 +153,7 @@
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
#define MI_LRI_LRM_CS_MMIO REG_BIT(19)
+#define MI_LRI_MMIO_REMAP_EN REG_BIT(17)
#define MI_LRI_FORCE_POSTED (1<<12)
#define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
@@ -153,8 +163,10 @@
#define MI_FLUSH_DW_PROTECTED_MEM_EN (1 << 22)
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
+#define MI_FLUSH_DW_CCS (1<<16)
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
#define MI_FLUSH_DW_OP_MASK (3<<14)
+#define MI_FLUSH_DW_LLC (1<<9)
#define MI_FLUSH_DW_NOTIFY (1<<8)
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
@@ -203,8 +215,27 @@
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+#define XY_CTRL_SURF_INSTR_SIZE 5
+#define MI_FLUSH_DW_SIZE 3
+#define XY_CTRL_SURF_COPY_BLT ((2 << 29) | (0x48 << 22) | 3)
+#define SRC_ACCESS_TYPE_SHIFT 21
+#define DST_ACCESS_TYPE_SHIFT 20
+#define CCS_SIZE_MASK 0x3FF
+#define CCS_SIZE_SHIFT 8
+#define XY_CTRL_SURF_MOCS_MASK GENMASK(31, 25)
+#define NUM_CCS_BYTES_PER_BLOCK 256
+#define NUM_BYTES_PER_CCS_BYTE 256
+#define NUM_CCS_BLKS_PER_XFER 1024
+#define INDIRECT_ACCESS 0
+#define DIRECT_ACCESS 1
+
#define COLOR_BLT_CMD (2 << 29 | 0x40 << 22 | (5 - 2))
#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22)
+#define XY_FAST_COLOR_BLT_CMD (2 << 29 | 0x44 << 22)
+#define XY_FAST_COLOR_BLT_DEPTH_32 (2 << 19)
+#define XY_FAST_COLOR_BLT_DW 16
+#define XY_FAST_COLOR_BLT_MOCS_MASK GENMASK(27, 21)
+#define XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT 31
#define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22)
#define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22)
#define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22)
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
new file mode 100644
index 000000000000..0e494028b81d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/irq.h>
+#include <linux/mei_aux.h>
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "gt/intel_gsc.h"
+#include "gt/intel_gt.h"
+
+#define GSC_BAR_LENGTH 0x00000FFC
+
+static void gsc_irq_mask(struct irq_data *d)
+{
+ /* generic irq handling */
+}
+
+static void gsc_irq_unmask(struct irq_data *d)
+{
+ /* generic irq handling */
+}
+
+static struct irq_chip gsc_irq_chip = {
+ .name = "gsc_irq_chip",
+ .irq_mask = gsc_irq_mask,
+ .irq_unmask = gsc_irq_unmask,
+};
+
+static int gsc_irq_init(int irq)
+{
+ irq_set_chip_and_handler_name(irq, &gsc_irq_chip,
+ handle_simple_irq, "gsc_irq_handler");
+
+ return irq_set_chip_data(irq, NULL);
+}
+
+struct gsc_def {
+ const char *name;
+ unsigned long bar;
+ size_t bar_size;
+};
+
+/* gsc resources and definitions (HECI1 and HECI2) */
+static const struct gsc_def gsc_def_dg1[] = {
+ {
+ /* HECI1 not yet implemented. */
+ },
+ {
+ .name = "mei-gscfi",
+ .bar = DG1_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ }
+};
+
+static const struct gsc_def gsc_def_dg2[] = {
+ {
+ .name = "mei-gsc",
+ .bar = DG2_GSC_HECI1_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ },
+ {
+ .name = "mei-gscfi",
+ .bar = DG2_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ }
+};
+
+static void gsc_release_dev(struct device *dev)
+{
+ struct auxiliary_device *aux_dev = to_auxiliary_dev(dev);
+ struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
+
+ kfree(adev);
+}
+
+static void gsc_destroy_one(struct intel_gsc_intf *intf)
+{
+ if (intf->adev) {
+ auxiliary_device_delete(&intf->adev->aux_dev);
+ auxiliary_device_uninit(&intf->adev->aux_dev);
+ intf->adev = NULL;
+ }
+ if (intf->irq >= 0)
+ irq_free_desc(intf->irq);
+ intf->irq = -1;
+}
+
+static void gsc_init_one(struct drm_i915_private *i915,
+ struct intel_gsc_intf *intf,
+ unsigned int intf_id)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct mei_aux_device *adev;
+ struct auxiliary_device *aux_dev;
+ const struct gsc_def *def;
+ int ret;
+
+ intf->irq = -1;
+ intf->id = intf_id;
+
+ if (intf_id == 0 && !HAS_HECI_PXP(i915))
+ return;
+
+ if (IS_DG1(i915)) {
+ def = &gsc_def_dg1[intf_id];
+ } else if (IS_DG2(i915)) {
+ def = &gsc_def_dg2[intf_id];
+ } else {
+ drm_warn_once(&i915->drm, "Unknown platform\n");
+ return;
+ }
+
+ if (!def->name) {
+ drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1);
+ return;
+ }
+
+ intf->irq = irq_alloc_desc(0);
+ if (intf->irq < 0) {
+ drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
+ return;
+ }
+
+ ret = gsc_irq_init(intf->irq);
+ if (ret < 0) {
+ drm_err(&i915->drm, "gsc irq init failed %d\n", ret);
+ goto fail;
+ }
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ goto fail;
+
+ adev->irq = intf->irq;
+ adev->bar.parent = &pdev->resource[0];
+ adev->bar.start = def->bar + pdev->resource[0].start;
+ adev->bar.end = adev->bar.start + def->bar_size - 1;
+ adev->bar.flags = IORESOURCE_MEM;
+ adev->bar.desc = IORES_DESC_NONE;
+
+ aux_dev = &adev->aux_dev;
+ aux_dev->name = def->name;
+ aux_dev->id = (pci_domain_nr(pdev->bus) << 16) |
+ PCI_DEVID(pdev->bus->number, pdev->devfn);
+ aux_dev->dev.parent = &pdev->dev;
+ aux_dev->dev.release = gsc_release_dev;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret < 0) {
+ drm_err(&i915->drm, "gsc aux init failed %d\n", ret);
+ kfree(adev);
+ goto fail;
+ }
+
+ ret = auxiliary_device_add(aux_dev);
+ if (ret < 0) {
+ drm_err(&i915->drm, "gsc aux add failed %d\n", ret);
+ /* adev will be freed with the put_device() and .release sequence */
+ auxiliary_device_uninit(aux_dev);
+ goto fail;
+ }
+ intf->adev = adev;
+
+ return;
+fail:
+ gsc_destroy_one(intf);
+}
+
+static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
+{
+ int ret;
+
+ if (intf_id >= INTEL_GSC_NUM_INTERFACES) {
+ drm_warn_once(&gt->i915->drm, "GSC irq: intf_id %d is out of range", intf_id);
+ return;
+ }
+
+ if (!HAS_HECI_GSC(gt->i915)) {
+ drm_warn_once(&gt->i915->drm, "GSC irq: not supported");
+ return;
+ }
+
+ if (gt->gsc.intf[intf_id].irq < 0) {
+ drm_err_ratelimited(&gt->i915->drm, "GSC irq: irq not set");
+ return;
+ }
+
+ ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
+ if (ret)
+ drm_err_ratelimited(&gt->i915->drm, "error handling GSC irq: %d\n", ret);
+}
+
+void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir)
+{
+ if (iir & GSC_IRQ_INTF(0))
+ gsc_irq_handler(gt, 0);
+ if (iir & GSC_IRQ_INTF(1))
+ gsc_irq_handler(gt, 1);
+}
+
+void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
+{
+ unsigned int i;
+
+ if (!HAS_HECI_GSC(i915))
+ return;
+
+ for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
+ gsc_init_one(i915, &gsc->intf[i], i);
+}
+
+void intel_gsc_fini(struct intel_gsc *gsc)
+{
+ struct intel_gt *gt = gsc_to_gt(gsc);
+ unsigned int i;
+
+ if (!HAS_HECI_GSC(gt->i915))
+ return;
+
+ for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
+ gsc_destroy_one(&gsc->intf[i]);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
new file mode 100644
index 000000000000..68582f912b21
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
+ */
+#ifndef __INTEL_GSC_DEV_H__
+#define __INTEL_GSC_DEV_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_gt;
+struct mei_aux_device;
+
+#define INTEL_GSC_NUM_INTERFACES 2
+/*
+ * The HECI1 bit corresponds to bit15 and HECI2 to bit14.
+ * The reason for this is to allow growth for more interfaces in the future.
+ */
+#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
+
+/**
+ * struct intel_gsc - graphics security controller
+ * @intf : gsc interface
+ */
+struct intel_gsc {
+ struct intel_gsc_intf {
+ struct mei_aux_device *adev;
+ int irq;
+ unsigned int id;
+ } intf[INTEL_GSC_NUM_INTERFACES];
+};
+
+void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *dev_priv);
+void intel_gsc_fini(struct intel_gsc *gsc);
+void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir);
+
+#endif /* __INTEL_GSC_DEV_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 8a2483ccbfb9..53307ca0eed0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -4,7 +4,6 @@
*/
#include <drm/drm_managed.h>
-#include <drm/intel-gtt.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
@@ -17,6 +16,7 @@
#include "intel_gt_buffer_pool.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_debugfs.h"
+#include "intel_gt_gmch.h"
#include "intel_gt_pm.h"
#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
@@ -26,10 +26,11 @@
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
+#include "intel_gt_sysfs.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
-void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+static void __intel_gt_init_early(struct intel_gt *gt)
{
spin_lock_init(&gt->irq_lock);
@@ -51,17 +52,23 @@ void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_rps_init_early(&gt->rps);
}
-void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+/* Preliminary initialization of Tile 0 */
+void intel_root_gt_init_early(struct drm_i915_private *i915)
{
+ struct intel_gt *gt = to_gt(i915);
+
gt->i915 = i915;
gt->uncore = &i915->uncore;
+
+ __intel_gt_init_early(gt);
}
-int intel_gt_probe_lmem(struct intel_gt *gt)
+static int intel_gt_probe_lmem(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
+ unsigned int instance = gt->info.id;
+ int id = INTEL_REGION_LMEM_0 + instance;
struct intel_memory_region *mem;
- int id;
int err;
mem = intel_gt_setup_lmem(gt);
@@ -76,9 +83,8 @@ int intel_gt_probe_lmem(struct intel_gt *gt)
return err;
}
- id = INTEL_REGION_LMEM;
-
mem->id = id;
+ mem->instance = instance;
intel_memory_region_set_name(mem, "local%u", mem->instance);
@@ -96,6 +102,12 @@ int intel_gt_assign_ggtt(struct intel_gt *gt)
return gt->ggtt ? 0 : -ENOMEM;
}
+static const char * const intel_steering_types[] = {
+ "L3BANK",
+ "MSLICE",
+ "LNCF",
+};
+
static const struct intel_mmio_range icl_l3bank_steering_table[] = {
{ 0x00B100, 0x00B3FF },
{},
@@ -439,14 +451,17 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
{
wmb();
if (GRAPHICS_VER(gt->i915) < 6)
- intel_gtt_chipset_flush();
+ intel_gt_gmch_gen5_chipset_flush(gt);
}
void intel_gt_driver_register(struct intel_gt *gt)
{
+ intel_gsc_init(&gt->gsc, gt->i915);
+
intel_rps_driver_register(&gt->rps);
intel_gt_debugfs_register(gt);
+ intel_gt_sysfs_register(gt);
}
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
@@ -712,6 +727,11 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_uc_init;
+ err = intel_gt_init_hwconfig(gt);
+ if (err)
+ drm_err(&gt->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
+ ERR_PTR(err));
+
err = __engines_record_defaults(gt);
if (err)
goto err_gt;
@@ -766,6 +786,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
intel_wakeref_t wakeref;
intel_rps_driver_unregister(&gt->rps);
+ intel_gsc_fini(&gt->gsc);
intel_pxp_fini(&gt->pxp);
@@ -793,18 +814,24 @@ void intel_gt_driver_release(struct intel_gt *gt)
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
intel_gt_fini_buffer_pool(gt);
+ intel_gt_fini_hwconfig(gt);
}
-void intel_gt_driver_late_release(struct intel_gt *gt)
+void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
{
+ struct intel_gt *gt;
+ unsigned int id;
+
/* We need to wait for inflight RCU frees to release their grip */
rcu_barrier();
- intel_uc_driver_late_release(&gt->uc);
- intel_gt_fini_requests(gt);
- intel_gt_fini_reset(gt);
- intel_gt_fini_timelines(gt);
- intel_engines_free(gt);
+ for_each_gt(gt, i915, id) {
+ intel_uc_driver_late_release(&gt->uc);
+ intel_gt_fini_requests(gt);
+ intel_gt_fini_reset(gt);
+ intel_gt_fini_timelines(gt);
+ intel_engines_free(gt);
+ }
}
/**
@@ -913,6 +940,35 @@ u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg)
return intel_uncore_read_fw(gt->uncore, reg);
}
+/**
+ * intel_gt_get_valid_steering_for_reg - get a valid steering for a register
+ * @gt: GT structure
+ * @reg: register for which the steering is required
+ * @sliceid: return variable for slice steering
+ * @subsliceid: return variable for subslice steering
+ *
+ * This function returns a slice/subslice pair that is guaranteed to work for
+ * read steering of the given register. Note that a value will be returned even
+ * if the register is not replicated and therefore does not actually require
+ * steering.
+ */
+void intel_gt_get_valid_steering_for_reg(struct intel_gt *gt, i915_reg_t reg,
+ u8 *sliceid, u8 *subsliceid)
+{
+ int type;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
+ intel_gt_get_valid_steering(gt, type, sliceid,
+ subsliceid);
+ return;
+ }
+ }
+
+ *sliceid = gt->default_steering.groupid;
+ *subsliceid = gt->default_steering.instanceid;
+}
+
u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg)
{
int type;
@@ -932,6 +988,145 @@ u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg)
return intel_uncore_read(gt->uncore, reg);
}
+static void report_steering_type(struct drm_printer *p,
+ struct intel_gt *gt,
+ enum intel_steering_type type,
+ bool dump_table)
+{
+ const struct intel_mmio_range *entry;
+ u8 slice, subslice;
+
+ BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
+
+ if (!gt->steering_table[type]) {
+ drm_printf(p, "%s steering: uses default steering\n",
+ intel_steering_types[type]);
+ return;
+ }
+
+ intel_gt_get_valid_steering(gt, type, &slice, &subslice);
+ drm_printf(p, "%s steering: sliceid=0x%x, subsliceid=0x%x\n",
+ intel_steering_types[type], slice, subslice);
+
+ if (!dump_table)
+ return;
+
+ for (entry = gt->steering_table[type]; entry->end; entry++)
+ drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
+}
+
+void intel_gt_report_steering(struct drm_printer *p, struct intel_gt *gt,
+ bool dump_table)
+{
+ drm_printf(p, "Default steering: sliceid=0x%x, subsliceid=0x%x\n",
+ gt->default_steering.groupid,
+ gt->default_steering.instanceid);
+
+ if (HAS_MSLICES(gt->i915)) {
+ report_steering_type(p, gt, MSLICE, dump_table);
+ report_steering_type(p, gt, LNCF, dump_table);
+ }
+}
+
+static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
+{
+ int ret;
+
+ if (!gt_is_root(gt)) {
+ struct intel_uncore_mmio_debug *mmio_debug;
+ struct intel_uncore *uncore;
+
+ uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
+ if (!mmio_debug) {
+ kfree(uncore);
+ return -ENOMEM;
+ }
+
+ gt->uncore = uncore;
+ gt->uncore->debug = mmio_debug;
+
+ __intel_gt_init_early(gt);
+ }
+
+ intel_uncore_init_early(gt->uncore, gt);
+
+ ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
+ if (ret)
+ return ret;
+
+ gt->phys_addr = phys_addr;
+
+ return 0;
+}
+
+static void
+intel_gt_tile_cleanup(struct intel_gt *gt)
+{
+ intel_uncore_cleanup_mmio(gt->uncore);
+
+ if (!gt_is_root(gt)) {
+ kfree(gt->uncore->debug);
+ kfree(gt->uncore);
+ kfree(gt);
+ }
+}
+
+int intel_gt_probe_all(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct intel_gt *gt = &i915->gt0;
+ phys_addr_t phys_addr;
+ unsigned int mmio_bar;
+ int ret;
+
+ mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
+ phys_addr = pci_resource_start(pdev, mmio_bar);
+
+ /*
+ * We always have at least one primary GT on any device
+ * and it has been already initialized early during probe
+ * in i915_driver_probe()
+ */
+ ret = intel_gt_tile_setup(gt, phys_addr);
+ if (ret)
+ return ret;
+
+ i915->gt[0] = gt;
+
+ /* TODO: add more tiles */
+ return 0;
+}
+
+int intel_gt_tiles_init(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ unsigned int id;
+ int ret;
+
+ for_each_gt(gt, i915, id) {
+ ret = intel_gt_probe_lmem(gt);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void intel_gt_release_all(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, i915, id) {
+ intel_gt_tile_cleanup(gt);
+ i915->gt[id] = NULL;
+ }
+}
+
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p)
{
@@ -980,6 +1175,7 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
[VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
[VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
[COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
+ [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR,
};
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 0f571c8ee22b..44c6cb63ccbc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -13,12 +13,24 @@
struct drm_i915_private;
struct drm_printer;
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct i915_vma_resource *vma_res;
+ enum i915_cache_level level;
+ u32 flags;
+};
+
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
##__VA_ARGS__); \
} while (0)
+static inline bool gt_is_root(struct intel_gt *gt)
+{
+ return !gt->info.id;
+}
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
@@ -34,10 +46,13 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
return container_of(huc, struct intel_gt, uc.huc);
}
-void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
-void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
+static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
+{
+ return container_of(gsc, struct intel_gt, gsc);
+}
+
+void intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
-int intel_gt_probe_lmem(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt);
@@ -47,7 +62,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt);
void intel_gt_driver_remove(struct intel_gt *gt);
void intel_gt_driver_release(struct intel_gt *gt);
-void intel_gt_driver_late_release(struct intel_gt *gt);
+void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
@@ -84,9 +99,25 @@ static inline bool intel_gt_needs_read_steering(struct intel_gt *gt,
return gt->steering_table[type];
}
+void intel_gt_get_valid_steering_for_reg(struct intel_gt *gt, i915_reg_t reg,
+ u8 *sliceid, u8 *subsliceid);
+
u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg);
u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg);
+void intel_gt_report_steering(struct drm_printer *p, struct intel_gt *gt,
+ bool dump_table);
+
+int intel_gt_probe_all(struct drm_i915_private *i915);
+int intel_gt_tiles_init(struct drm_i915_private *i915);
+void intel_gt_release_all(struct drm_i915_private *i915);
+
+#define for_each_gt(gt__, i915__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_MAX_GT; \
+ (id__)++) \
+ for_each_if(((gt__) = (i915__)->gt[(id__)]))
+
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p);
@@ -94,4 +125,6 @@ void intel_gt_watchdog_work(struct work_struct *work);
void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+struct resource intel_pci_resource(struct pci_dev *pdev, int bar);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 0db822c3b7e5..d5d1b04dbcad 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -161,6 +161,10 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
if (gt->clock_frequency)
gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
+ /* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
+ if (GRAPHICS_VER(gt->i915) == 11)
+ gt->clock_period_ns = NSEC_PER_SEC / 13750000;
+
GT_TRACE(gt,
"Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
gt->clock_frequency / 1000,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
index f103664b71d4..d886fdc2c694 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -6,6 +6,7 @@
#include <linux/debugfs.h>
#include "i915_drv.h"
+#include "intel_gt.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_engines_debugfs.h"
#include "intel_gt_pm_debugfs.h"
@@ -29,7 +30,7 @@ int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val)
}
}
-int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val)
+void intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val)
{
/* Flush any previous reset before applying for a new one */
wait_event(gt->reset.queue,
@@ -37,7 +38,6 @@ int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val)
intel_gt_handle_error(gt, val, I915_ERROR_CAPTURE,
"Manually reset engine mask to %llx", val);
- return 0;
}
/*
@@ -51,16 +51,30 @@ static int __intel_gt_debugfs_reset_show(void *data, u64 *val)
static int __intel_gt_debugfs_reset_store(void *data, u64 val)
{
- return intel_gt_debugfs_reset_store(data, val);
+ intel_gt_debugfs_reset_store(data, val);
+
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(reset_fops, __intel_gt_debugfs_reset_show,
__intel_gt_debugfs_reset_store, "%llu\n");
+static int steering_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct intel_gt *gt = m->private;
+
+ intel_gt_report_steering(&p, gt, true);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(steering);
+
static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "reset", &reset_fops, NULL },
+ { "steering", &steering_fops },
};
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
index 17e79b735cfe..e4110eebf093 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
@@ -48,6 +48,6 @@ void intel_gt_debugfs_register_files(struct dentry *root,
/* functions that need to be accessed by the upper level non-gt interfaces */
int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val);
-int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val);
+void intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val);
#endif /* INTEL_GT_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_gmch.c b/drivers/gpu/drm/i915/gt/intel_gt_gmch.c
new file mode 100644
index 000000000000..18e488672d1b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_gmch.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/intel-gtt.h>
+#include <drm/i915_drm.h>
+
+#include <linux/agp_backend.h>
+#include <linux/stop_machine.h>
+
+#include "i915_drv.h"
+#include "intel_gt_gmch.h"
+#include "intel_gt_regs.h"
+#include "intel_gt.h"
+#include "i915_utils.h"
+
+#include "gen8_ppgtt.h"
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void nop_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
+
+ if (level != I915_CACHE_NONE)
+ pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+ return pte;
+}
+
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (level != I915_CACHE_NONE)
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE3;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE3;
+ break;
+ }
+
+ return pte;
+}
+
+static void gen5_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+}
+
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
+
+ ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
+
+ ggtt->invalidate(ggtt);
+}
+
+static void gen5_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
+ flags);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level.
+ * The object will be accessible to the GPU via commands whose operands
+ * reference offsets within the global GTT as well as accessible by the GPU
+ * through the GMADR mapped BAR (i915->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *gte;
+ gen6_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ gte = (gen6_pte_t __iomem *)ggtt->gsm;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
+ iowrite32(vm->pte_encode(addr, level, flags), gte++);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ iowrite32(vm->scratch[0]->encode, gte++);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *gte;
+ gen8_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ gte = (gen8_pte_t __iomem *)ggtt->gsm;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
+ gen8_set_pte(gte++, pte_encode | addr);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0]->encode);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
+}
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct insert_entries arg = { vm, vma_res, level, flags };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt)
+{
+ intel_gtt_chipset_flush();
+}
+
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ intel_gtt_chipset_flush();
+}
+
+static void gen5_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = vm->scratch[0]->encode;
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
+ gen8_pte_t __iomem *gtt_base =
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+}
+
+static void gen5_gmch_remove(struct i915_address_space *vm)
+{
+ intel_gmch_remove();
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ iounmap(ggtt->gsm);
+ free_scratch(vm);
+}
+
+/*
+ * Certain Gen5 chipsets require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static bool needs_idle_maps(struct drm_i915_private *i915)
+{
+ /*
+ * Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if (!i915_vtd_active(i915))
+ return false;
+
+ if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
+ return true;
+
+ if (GRAPHICS_VER(i915) == 12)
+ return true; /* XXX DMAR fault reason 7 */
+
+ return false;
+}
+
+static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
+{
+ /*
+ * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
+ * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
+ */
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
+ return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
+}
+
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+ if (bdw_gmch_ctl)
+ bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
+ if (bdw_gmch_ctl > 4)
+ bdw_gmch_ctl = 4;
+#endif
+
+ return bdw_gmch_ctl << 20;
+}
+
+static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
+{
+ return gen6_gttmmadr_size(i915) / 2;
+}
+
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ phys_addr_t phys_addr;
+ u32 pte_flags;
+ int ret;
+
+ GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
+
+ /*
+ * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
+ * will be dropped. For WC mappings in general we have 64 byte burst
+ * writes when the WC buffer is flushed, so we can't use it, but have to
+ * resort to an uncached mapping. The WC issue is easily caught by the
+ * readback check when writing GTT PTE entries.
+ */
+ if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
+ ggtt->gsm = ioremap(phys_addr, size);
+ else
+ ggtt->gsm = ioremap_wc(phys_addr, size);
+ if (!ggtt->gsm) {
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
+ return -ENOMEM;
+ }
+
+ kref_init(&ggtt->vm.resv_ref);
+ ret = setup_scratch_page(&ggtt->vm);
+ if (ret) {
+ drm_err(&i915->drm, "Scratch setup failed\n");
+ /* iounmap will also get called at remove, but meh */
+ iounmap(ggtt->gsm);
+ return ret;
+ }
+
+ pte_flags = 0;
+ if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
+ pte_flags |= PTE_LM;
+
+ ggtt->vm.scratch[0]->encode =
+ ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
+ I915_CACHE_NONE, pte_flags);
+
+ return 0;
+}
+
+int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ phys_addr_t gmadr_base;
+ int ret;
+
+ ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
+ if (!ret) {
+ drm_err(&i915->drm, "failed to set up gmch\n");
+ return -EIO;
+ }
+
+ intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
+
+ ggtt->gmadr =
+ (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ if (needs_idle_maps(i915)) {
+ drm_notice(&i915->drm,
+ "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
+ ggtt->do_idle_maps = true;
+ }
+
+ ggtt->vm.insert_page = gen5_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen5_ggtt_insert_entries;
+ ggtt->vm.clear_range = gen5_ggtt_clear_range;
+ ggtt->vm.cleanup = gen5_gmch_remove;
+
+ ggtt->invalidate = gmch_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ if (unlikely(ggtt->do_idle_maps))
+ drm_notice(&i915->drm,
+ "Applying Ironlake quirks for intel_iommu\n");
+
+ return 0;
+}
+
+int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int size;
+ u16 snb_gmch_ctl;
+
+ ggtt->gmadr = intel_pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+
+ /*
+ * 64/512MB is the current min/max we actually know of, but this is
+ * just a coarse sanity check.
+ */
+ if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
+ drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
+ &ggtt->mappable_end);
+ return -ENXIO;
+ }
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
+ if (HAS_EDRAM(i915))
+ ggtt->vm.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(i915))
+ ggtt->vm.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(i915))
+ ggtt->vm.pte_encode = byt_pte_encode;
+ else if (GRAPHICS_VER(i915) >= 7)
+ ggtt->vm.pte_encode = ivb_pte_encode;
+ else
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
+int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int size;
+ u16 snb_gmch_ctl;
+
+ /* TODO: We're not aware of mappable constraints on gen8 yet */
+ if (!HAS_LMEM(i915)) {
+ ggtt->gmadr = intel_pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ if (IS_CHERRYVIEW(i915))
+ size = chv_get_total_gtt_size(snb_gmch_ctl);
+ else
+ size = gen8_get_total_gtt_size(snb_gmch_ctl);
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
+
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
+ if (intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+
+ /*
+ * Serialize GTT updates with aperture access on BXT if VT-d is on,
+ * and always on CHV.
+ */
+ if (intel_vm_no_concurrent_access_wa(i915)) {
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ ggtt->vm.bind_async_flags =
+ I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ }
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+
+ setup_private_pat(ggtt->vm.gt->uncore);
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915)
+{
+ if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_gmch.h b/drivers/gpu/drm/i915/gt/intel_gt_gmch.h
new file mode 100644
index 000000000000..75ed55c1f30a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_gmch.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_GMCH_H__
+#define __INTEL_GT_GMCH_H__
+
+#include "intel_gtt.h"
+
+/* For x86 platforms */
+#if IS_ENABLED(CONFIG_X86)
+void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt);
+int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt);
+int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt);
+int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt);
+int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915);
+
+/* Stubs for non-x86 platforms */
+#else
+static inline void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt)
+{
+}
+static inline int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt)
+{
+ /* No HW should be probed for this case yet, return fail */
+ return -ENODEV;
+}
+static inline int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt)
+{
+ /* No HW should be probed for this case yet, return fail */
+ return -ENODEV;
+}
+static inline int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt)
+{
+ /* No HW should be probed for this case yet, return fail */
+ return -ENODEV;
+}
+static inline int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915)
+{
+ /* No HW should be enabled for this case yet, return fail */
+ return -ENODEV;
+}
+#endif
+
+#endif /* __INTEL_GT_GMCH_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index e443ac4c8059..88b4becfcb17 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -68,6 +68,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
if (instance == OTHER_KCR_INSTANCE)
return intel_pxp_irq_handler(&gt->pxp, iir);
+ if (instance == OTHER_GSC_INSTANCE)
+ return intel_gsc_irq_handler(gt, iir);
+
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
}
@@ -184,6 +187,8 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
if (CCS_MASK(gt))
intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, 0);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, 0);
/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
@@ -201,6 +206,8 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~0);
if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~0);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
@@ -215,6 +222,7 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
u32 irqs = GT_RENDER_USER_INTERRUPT;
+ const u32 gsc_mask = GSC_IRQ_INTF(0) | GSC_IRQ_INTF(1);
u32 dmask;
u32 smask;
@@ -233,6 +241,9 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
if (CCS_MASK(gt))
intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE,
+ gsc_mask);
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
@@ -250,6 +261,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~gsc_mask);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index c0fa41e4c803..f553e2173bda 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -3,6 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/string_helpers.h>
#include <linux/suspend.h>
#include "i915_drv.h"
@@ -128,7 +129,14 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+ /*
+ * We access the runtime_pm structure via gt->i915 here rather than
+ * gt->uncore as we do elsewhere in the file because gt->uncore is not
+ * yet initialized for all tiles at this point in the driver startup.
+ * runtime_pm is per-device rather than per-tile, so this is still the
+ * correct structure.
+ */
+ intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
@@ -157,7 +165,7 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
enum intel_engine_id id;
intel_wakeref_t wakeref;
- GT_TRACE(gt, "force:%s", yesno(force));
+ GT_TRACE(gt, "force:%s", str_yes_no(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@@ -174,15 +182,16 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
if (intel_gt_is_wedged(gt))
intel_gt_unset_wedged(gt);
- for_each_engine(engine, gt, id)
+ /* For GuC mode, ensure submission is disabled before stopping ring */
+ intel_uc_reset_prepare(&gt->uc);
+
+ for_each_engine(engine, gt, id) {
if (engine->reset.prepare)
engine->reset.prepare(engine);
- intel_uc_reset_prepare(&gt->uc);
-
- for_each_engine(engine, gt, id)
if (engine->sanitize)
engine->sanitize(engine);
+ }
if (reset_engines(gt) || force) {
for_each_engine(engine, gt, id)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 37765919fe32..0c6b9eb724ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -5,6 +5,7 @@
*/
#include <linux/seq_file.h>
+#include <linux/string_helpers.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -23,38 +24,38 @@
#include "intel_uncore.h"
#include "vlv_sideband.h"
-int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
+void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
atomic_inc(&gt->user_wakeref);
intel_gt_pm_get(gt);
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_get(gt->uncore);
-
- return 0;
}
-int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
+void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
{
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_put(gt->uncore);
intel_gt_pm_put(gt);
atomic_dec(&gt->user_wakeref);
-
- return 0;
}
static int forcewake_user_open(struct inode *inode, struct file *file)
{
struct intel_gt *gt = inode->i_private;
- return intel_gt_pm_debugfs_forcewake_user_open(gt);
+ intel_gt_pm_debugfs_forcewake_user_open(gt);
+
+ return 0;
}
static int forcewake_user_release(struct inode *inode, struct file *file)
{
struct intel_gt *gt = inode->i_private;
- return intel_gt_pm_debugfs_forcewake_user_release(gt);
+ intel_gt_pm_debugfs_forcewake_user_release(gt);
+
+ return 0;
}
static const struct file_operations forcewake_user_fops = {
@@ -105,14 +106,14 @@ static int vlv_drpc(struct seq_file *m)
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+ str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_EI_MODE(1))));
seq_printf(m, "Render Power Well: %s\n",
(pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Media Power Well: %s\n",
(pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
- print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
+ print_rc6_res(m, "Render RC6 residency since boot:", GEN6_GT_GFX_RC6);
print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
return fw_domains_show(m, NULL);
@@ -140,19 +141,19 @@ static int gen6_drpc(struct seq_file *m)
snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
seq_printf(m, "RC1e Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+ str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
if (GRAPHICS_VER(i915) >= 9) {
seq_printf(m, "Render Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
+ str_yes_no(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
seq_printf(m, "Media Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
+ str_yes_no(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
}
seq_printf(m, "Deep RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+ str_yes_no(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
seq_printf(m, "Deepest RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+ str_yes_no(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
seq_puts(m, "Current RC state: ");
switch (gt_core_status & GEN6_RCn_MASK) {
case GEN6_RC0:
@@ -176,7 +177,7 @@ static int gen6_drpc(struct seq_file *m)
}
seq_printf(m, "Core Power Down: %s\n",
- yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
if (GRAPHICS_VER(i915) >= 9) {
seq_printf(m, "Render Power Well: %s\n",
(gen9_powergate_status &
@@ -216,16 +217,17 @@ static int ilk_drpc(struct seq_file *m)
rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
- seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
+ seq_printf(m, "HD boost: %s\n",
+ str_yes_no(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
(rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
MEMMODE_BOOST_FREQ_SHIFT);
seq_printf(m, "HW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
+ str_yes_no(rgvmodectl & MEMMODE_HWIDLE_EN));
seq_printf(m, "SW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_SWMODE_EN));
+ str_yes_no(rgvmodectl & MEMMODE_SWMODE_EN));
seq_printf(m, "Gated voltage change: %s\n",
- yesno(rgvmodectl & MEMMODE_RCLK_GATE));
+ str_yes_no(rgvmodectl & MEMMODE_RCLK_GATE));
seq_printf(m, "Starting frequency: P%d\n",
(rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
seq_printf(m, "Max P-state: P%d\n",
@@ -234,7 +236,7 @@ static int ilk_drpc(struct seq_file *m)
seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
seq_printf(m, "Render standby enabled: %s\n",
- yesno(!(rstdbyctl & RCX_SW_EXIT)));
+ str_yes_no(!(rstdbyctl & RCX_SW_EXIT)));
seq_puts(m, "Current RS state: ");
switch (rstdbyctl & RSX_STATUS_MASK) {
case RSX_STATUS_ON:
@@ -307,12 +309,11 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
drm_printf(p, "Video Turbo Mode: %s\n",
- yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
drm_printf(p, "HW control enabled: %s\n",
- yesno(rpmodectl & GEN6_RP_ENABLE));
+ str_yes_no(rpmodectl & GEN6_RP_ENABLE));
drm_printf(p, "SW control enabled: %s\n",
- yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
+ str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
vlv_punit_get(i915);
freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
@@ -341,17 +342,16 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
} else if (GRAPHICS_VER(i915) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
- u32 rp_state_cap;
+ struct intel_rps_freq_caps caps;
u32 rpmodectl, rpinclimit, rpdeclimit;
u32 rpstat, cagf, reqf;
u32 rpcurupei, rpcurup, rpprevup;
u32 rpcurdownei, rpcurdown, rpprevdown;
u32 rpupei, rpupt, rpdownei, rpdownt;
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
- int max_freq;
rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
- rp_state_cap = intel_rps_read_state_cap(rps);
+ gen6_rps_get_freq_caps(rps, &caps);
if (IS_GEN9_LP(i915))
gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
else
@@ -417,12 +417,11 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
drm_printf(p, "Video Turbo Mode: %s\n",
- yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
drm_printf(p, "HW control enabled: %s\n",
- yesno(rpmodectl & GEN6_RP_ENABLE));
+ str_yes_no(rpmodectl & GEN6_RP_ENABLE));
drm_printf(p, "SW control enabled: %s\n",
- yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
+ str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_mask);
@@ -474,25 +473,12 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n",
rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
- max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
- rp_state_cap >> 16) & 0xff;
- max_freq *= (IS_GEN9_BC(i915) ||
- GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
drm_printf(p, "Lowest (RPN) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
-
- max_freq = (rp_state_cap & 0xff00) >> 8;
- max_freq *= (IS_GEN9_BC(i915) ||
- GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
+ intel_gpu_freq(rps, caps.min_freq));
drm_printf(p, "Nominal (RP1) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
-
- max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
- rp_state_cap >> 0) & 0xff;
- max_freq *= (IS_GEN9_BC(i915) ||
- GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
+ intel_gpu_freq(rps, caps.rp1_freq));
drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
+ intel_gpu_freq(rps, caps.rp0_freq));
drm_printf(p, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(rps, rps->max_freq));
@@ -542,7 +528,7 @@ static int llc_show(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
- seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(i915)));
+ seq_printf(m, "LLC: %s\n", str_yes_no(HAS_LLC(i915)));
seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
i915->edram_size_mb);
@@ -604,10 +590,12 @@ static int rps_boost_show(struct seq_file *m, void *data)
struct drm_i915_private *i915 = gt->i915;
struct intel_rps *rps = &gt->rps;
- seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
- seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
+ seq_printf(m, "RPS enabled? %s\n",
+ str_yes_no(intel_rps_is_enabled(rps)));
+ seq_printf(m, "RPS active? %s\n",
+ str_yes_no(intel_rps_is_active(rps)));
seq_printf(m, "GPU busy? %s, %llums\n",
- yesno(gt->awake),
+ str_yes_no(gt->awake),
ktime_to_ms(intel_gt_get_awake_time(gt)));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
index a8457887ec65..0ace8c2da0ac 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
@@ -14,7 +14,7 @@ void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root);
void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *m);
/* functions that need to be accessed by the upper level non-gt interfaces */
-int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt);
-int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt);
+void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt);
+void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt);
#endif /* INTEL_GT_PM_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 19cd34f24263..a0a49c16babd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -46,6 +46,7 @@
#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
+#define GEN11_MCR_MULTICAST REG_BIT(31)
#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27)
#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
@@ -840,6 +841,24 @@
#define CTC_SHIFT_PARAMETER_SHIFT 1
#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
+/* GPM MSG_IDLE */
+#define MSG_IDLE_CS _MMIO(0x8000)
+#define MSG_IDLE_VCS0 _MMIO(0x8004)
+#define MSG_IDLE_VCS1 _MMIO(0x8008)
+#define MSG_IDLE_BCS _MMIO(0x800C)
+#define MSG_IDLE_VECS0 _MMIO(0x8010)
+#define MSG_IDLE_VCS2 _MMIO(0x80C0)
+#define MSG_IDLE_VCS3 _MMIO(0x80C4)
+#define MSG_IDLE_VCS4 _MMIO(0x80C8)
+#define MSG_IDLE_VCS5 _MMIO(0x80CC)
+#define MSG_IDLE_VCS6 _MMIO(0x80D0)
+#define MSG_IDLE_VCS7 _MMIO(0x80D4)
+#define MSG_IDLE_VECS1 _MMIO(0x80D8)
+#define MSG_IDLE_VECS2 _MMIO(0x80DC)
+#define MSG_IDLE_VECS3 _MMIO(0x80E0)
+#define MSG_IDLE_FW_MASK REG_GENMASK(13, 9)
+#define MSG_IDLE_FW_SHIFT 9
+
#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
@@ -988,6 +1007,7 @@
#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
+#define GEN12_COMPCTX_TLB_INV_CR _MMIO(0xcf04)
#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28)
#define RENDER_MOD_CTRL _MMIO(0xcf2c)
@@ -1087,6 +1107,7 @@
#define EU_PERF_CNTL3 _MMIO(0xe758)
#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8)
+#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15)
#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4)
#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32)
@@ -1440,7 +1461,6 @@
#define VLV_MEDIA_RC6_COUNT_EN (1 << 1)
#define VLV_RENDER_RC6_COUNT_EN (1 << 0)
#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
-#define VLV_GT_RENDER_RC6 _MMIO(0x138108)
#define VLV_GT_MEDIA_RC6 _MMIO(0x13810c)
#define GEN6_GT_GFX_RC6p _MMIO(0x13810c)
@@ -1483,6 +1503,7 @@
#define OTHER_GUC_INSTANCE 0
#define OTHER_GTPM_INSTANCE 1
#define OTHER_KCR_INSTANCE 4
+#define OTHER_GSC_INSTANCE 6
#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
new file mode 100644
index 000000000000..8ec8bc660c8c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_device.h>
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/printk.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_sysfs.h"
+#include "intel_gt.h"
+#include "intel_gt_sysfs.h"
+#include "intel_gt_sysfs_pm.h"
+#include "intel_gt_types.h"
+#include "intel_rc6.h"
+
+bool is_object_gt(struct kobject *kobj)
+{
+ return !strncmp(kobj->name, "gt", 2);
+}
+
+static struct intel_gt *kobj_to_gt(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_gt, base)->gt;
+}
+
+struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
+ const char *name)
+{
+ struct kobject *kobj = &dev->kobj;
+
+ /*
+ * We are interested at knowing from where the interface
+ * has been called, whether it's called from gt/ or from
+ * the parent directory.
+ * From the interface position it depends also the value of
+ * the private data.
+ * If the interface is called from gt/ then private data is
+ * of the "struct intel_gt *" type, otherwise it's * a
+ * "struct drm_i915_private *" type.
+ */
+ if (!is_object_gt(kobj)) {
+ struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+
+ return to_gt(i915);
+ }
+
+ return kobj_to_gt(kobj);
+}
+
+static struct kobject *gt_get_parent_obj(struct intel_gt *gt)
+{
+ return &gt->i915->drm.primary->kdev->kobj;
+}
+
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+
+ return sysfs_emit(buf, "%u\n", gt->info.id);
+}
+static DEVICE_ATTR_RO(id);
+
+static struct attribute *id_attrs[] = {
+ &dev_attr_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(id);
+
+static void kobj_gt_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct kobj_type kobj_gt_type = {
+ .release = kobj_gt_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = id_groups,
+};
+
+void intel_gt_sysfs_register(struct intel_gt *gt)
+{
+ struct kobj_gt *kg;
+
+ /*
+ * We need to make things right with the
+ * ABI compatibility. The files were originally
+ * generated under the parent directory.
+ *
+ * We generate the files only for gt 0
+ * to avoid duplicates.
+ */
+ if (gt_is_root(gt))
+ intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt));
+
+ kg = kzalloc(sizeof(*kg), GFP_KERNEL);
+ if (!kg)
+ goto exit_fail;
+
+ kobject_init(&kg->base, &kobj_gt_type);
+ kg->gt = gt;
+
+ /* xfer ownership to sysfs tree */
+ if (kobject_add(&kg->base, gt->i915->sysfs_gt, "gt%d", gt->info.id))
+ goto exit_kobj_put;
+
+ intel_gt_sysfs_pm_init(gt, &kg->base);
+
+ return;
+
+exit_kobj_put:
+ kobject_put(&kg->base);
+
+exit_fail:
+ drm_warn(&gt->i915->drm,
+ "failed to initialize gt%d sysfs root\n", gt->info.id);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
new file mode 100644
index 000000000000..9471b26752cf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __SYSFS_GT_H__
+#define __SYSFS_GT_H__
+
+#include <linux/ctype.h>
+#include <linux/kobject.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON() */
+
+struct intel_gt;
+
+struct kobj_gt {
+ struct kobject base;
+ struct intel_gt *gt;
+};
+
+bool is_object_gt(struct kobject *kobj);
+
+struct drm_i915_private *kobj_to_i915(struct kobject *kobj);
+
+struct kobject *
+intel_gt_create_kobj(struct intel_gt *gt,
+ struct kobject *dir,
+ const char *name);
+
+void intel_gt_sysfs_register(struct intel_gt *gt);
+struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
+ const char *name);
+
+#endif /* SYSFS_GT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
new file mode 100644
index 000000000000..f76b6cf8040e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_device.h>
+#include <linux/sysfs.h>
+#include <linux/printk.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_sysfs.h"
+#include "intel_gt.h"
+#include "intel_gt_regs.h"
+#include "intel_gt_sysfs.h"
+#include "intel_gt_sysfs_pm.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
+
+enum intel_gt_sysfs_op {
+ INTEL_GT_SYSFS_MIN = 0,
+ INTEL_GT_SYSFS_MAX,
+};
+
+static int
+sysfs_gt_attribute_w_func(struct device *dev, struct device_attribute *attr,
+ int (func)(struct intel_gt *gt, u32 val), u32 val)
+{
+ struct intel_gt *gt;
+ int ret;
+
+ if (!is_object_gt(&dev->kobj)) {
+ int i;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+
+ for_each_gt(gt, i915, i) {
+ ret = func(gt, val);
+ if (ret)
+ break;
+ }
+ } else {
+ gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ ret = func(gt, val);
+ }
+
+ return ret;
+}
+
+static u32
+sysfs_gt_attribute_r_func(struct device *dev, struct device_attribute *attr,
+ u32 (func)(struct intel_gt *gt),
+ enum intel_gt_sysfs_op op)
+{
+ struct intel_gt *gt;
+ u32 ret;
+
+ ret = (op == INTEL_GT_SYSFS_MAX) ? 0 : (u32) -1;
+
+ if (!is_object_gt(&dev->kobj)) {
+ int i;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+
+ for_each_gt(gt, i915, i) {
+ u32 val = func(gt);
+
+ switch (op) {
+ case INTEL_GT_SYSFS_MIN:
+ if (val < ret)
+ ret = val;
+ break;
+
+ case INTEL_GT_SYSFS_MAX:
+ if (val > ret)
+ ret = val;
+ break;
+ }
+ }
+ } else {
+ gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ ret = func(gt);
+ }
+
+ return ret;
+}
+
+/* RC6 interfaces will show the minimum RC6 residency value */
+#define sysfs_gt_attribute_r_min_func(d, a, f) \
+ sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MIN)
+
+/* Frequency interfaces will show the maximum frequency value */
+#define sysfs_gt_attribute_r_max_func(d, a, f) \
+ sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MAX)
+
+#ifdef CONFIG_PM
+static u32 get_residency(struct intel_gt *gt, i915_reg_t reg)
+{
+ intel_wakeref_t wakeref;
+ u64 res = 0;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ res = intel_rc6_residency_us(&gt->rc6, reg);
+
+ return DIV_ROUND_CLOSEST_ULL(res, 1000);
+}
+
+static ssize_t rc6_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ u8 mask = 0;
+
+ if (HAS_RC6(gt->i915))
+ mask |= BIT(0);
+ if (HAS_RC6p(gt->i915))
+ mask |= BIT(1);
+ if (HAS_RC6pp(gt->i915))
+ mask |= BIT(2);
+
+ return sysfs_emit(buff, "%x\n", mask);
+}
+
+static u32 __rc6_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, GEN6_GT_GFX_RC6);
+}
+
+static ssize_t rc6_residency_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ u32 rc6_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+ __rc6_residency_ms_show);
+
+ return sysfs_emit(buff, "%u\n", rc6_residency);
+}
+
+static u32 __rc6p_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, GEN6_GT_GFX_RC6p);
+}
+
+static ssize_t rc6p_residency_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ u32 rc6p_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+ __rc6p_residency_ms_show);
+
+ return sysfs_emit(buff, "%u\n", rc6p_residency);
+}
+
+static u32 __rc6pp_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, GEN6_GT_GFX_RC6pp);
+}
+
+static ssize_t rc6pp_residency_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ u32 rc6pp_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+ __rc6pp_residency_ms_show);
+
+ return sysfs_emit(buff, "%u\n", rc6pp_residency);
+}
+
+static u32 __media_rc6_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, VLV_GT_MEDIA_RC6);
+}
+
+static ssize_t media_rc6_residency_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ u32 rc6_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+ __media_rc6_residency_ms_show);
+
+ return sysfs_emit(buff, "%u\n", rc6_residency);
+}
+
+static DEVICE_ATTR_RO(rc6_enable);
+static DEVICE_ATTR_RO(rc6_residency_ms);
+static DEVICE_ATTR_RO(rc6p_residency_ms);
+static DEVICE_ATTR_RO(rc6pp_residency_ms);
+static DEVICE_ATTR_RO(media_rc6_residency_ms);
+
+static struct attribute *rc6_attrs[] = {
+ &dev_attr_rc6_enable.attr,
+ &dev_attr_rc6_residency_ms.attr,
+ NULL
+};
+
+static struct attribute *rc6p_attrs[] = {
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute *media_rc6_attrs[] = {
+ &dev_attr_media_rc6_residency_ms.attr,
+ NULL
+};
+
+static const struct attribute_group rc6_attr_group[] = {
+ { .attrs = rc6_attrs, },
+ { .name = power_group_name, .attrs = rc6_attrs, },
+};
+
+static const struct attribute_group rc6p_attr_group[] = {
+ { .attrs = rc6p_attrs, },
+ { .name = power_group_name, .attrs = rc6p_attrs, },
+};
+
+static const struct attribute_group media_rc6_attr_group[] = {
+ { .attrs = media_rc6_attrs, },
+ { .name = power_group_name, .attrs = media_rc6_attrs, },
+};
+
+static int __intel_gt_sysfs_create_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return is_object_gt(kobj) ?
+ sysfs_create_group(kobj, &grp[0]) :
+ sysfs_merge_group(kobj, &grp[1]);
+}
+
+static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
+{
+ int ret;
+
+ if (!HAS_RC6(gt->i915))
+ return;
+
+ ret = __intel_gt_sysfs_create_group(kobj, rc6_attr_group);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u RC6 sysfs files (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+
+ /*
+ * cannot use the is_visible() attribute because
+ * the upper object inherits from the parent group.
+ */
+ if (HAS_RC6p(gt->i915)) {
+ ret = __intel_gt_sysfs_create_group(kobj, rc6p_attr_group);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u RC6p sysfs files (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+ }
+
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
+ ret = __intel_gt_sysfs_create_group(kobj, media_rc6_attr_group);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create media %u RC6 sysfs files (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+ }
+}
+#else
+static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
+{
+}
+#endif /* CONFIG_PM */
+
+static u32 __act_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_read_actual_frequency(&gt->rps);
+}
+
+static ssize_t act_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 actual_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __act_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", actual_freq);
+}
+
+static u32 __cur_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_requested_frequency(&gt->rps);
+}
+
+static ssize_t cur_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 cur_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __cur_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", cur_freq);
+}
+
+static u32 __boost_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_boost_frequency(&gt->rps);
+}
+
+static ssize_t boost_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ u32 boost_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __boost_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", boost_freq);
+}
+
+static int __boost_freq_mhz_store(struct intel_gt *gt, u32 val)
+{
+ return intel_rps_set_boost_frequency(&gt->rps, val);
+}
+
+static ssize_t boost_freq_mhz_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ ssize_t ret;
+ u32 val;
+
+ ret = kstrtou32(buff, 0, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_gt_attribute_w_func(dev, attr,
+ __boost_freq_mhz_store, val) ?: count;
+}
+
+static u32 __rp0_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_rp0_frequency(&gt->rps);
+}
+
+static ssize_t RP0_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 rp0_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __rp0_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", rp0_freq);
+}
+
+static u32 __rp1_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_rp1_frequency(&gt->rps);
+}
+
+static ssize_t RP1_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 rp1_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __rp1_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", rp1_freq);
+}
+
+static u32 __rpn_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_rpn_frequency(&gt->rps);
+}
+
+static ssize_t RPn_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 rpn_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __rpn_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", rpn_freq);
+}
+
+static u32 __max_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_max_frequency(&gt->rps);
+}
+
+static ssize_t max_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 max_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __max_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", max_freq);
+}
+
+static int __set_max_freq(struct intel_gt *gt, u32 val)
+{
+ return intel_rps_set_max_frequency(&gt->rps, val);
+}
+
+static ssize_t max_freq_mhz_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 val;
+
+ ret = kstrtou32(buff, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = sysfs_gt_attribute_w_func(dev, attr, __set_max_freq, val);
+
+ return ret ?: count;
+}
+
+static u32 __min_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_min_frequency(&gt->rps);
+}
+
+static ssize_t min_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 min_freq = sysfs_gt_attribute_r_min_func(dev, attr,
+ __min_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", min_freq);
+}
+
+static int __set_min_freq(struct intel_gt *gt, u32 val)
+{
+ return intel_rps_set_min_frequency(&gt->rps, val);
+}
+
+static ssize_t min_freq_mhz_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 val;
+
+ ret = kstrtou32(buff, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = sysfs_gt_attribute_w_func(dev, attr, __set_min_freq, val);
+
+ return ret ?: count;
+}
+
+static u32 __vlv_rpe_freq_mhz_show(struct intel_gt *gt)
+{
+ struct intel_rps *rps = &gt->rps;
+
+ return intel_gpu_freq(rps, rps->efficient_freq);
+}
+
+static ssize_t vlv_rpe_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 rpe_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __vlv_rpe_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", rpe_freq);
+}
+
+#define INTEL_GT_RPS_SYSFS_ATTR(_name, _mode, _show, _store) \
+ static struct device_attribute dev_attr_gt_##_name = __ATTR(gt_##_name, _mode, _show, _store); \
+ static struct device_attribute dev_attr_rps_##_name = __ATTR(rps_##_name, _mode, _show, _store)
+
+#define INTEL_GT_RPS_SYSFS_ATTR_RO(_name) \
+ INTEL_GT_RPS_SYSFS_ATTR(_name, 0444, _name##_show, NULL)
+#define INTEL_GT_RPS_SYSFS_ATTR_RW(_name) \
+ INTEL_GT_RPS_SYSFS_ATTR(_name, 0644, _name##_show, _name##_store)
+
+/* The below macros generate static structures */
+INTEL_GT_RPS_SYSFS_ATTR_RO(act_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RO(cur_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RW(boost_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RO(RP0_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RO(RP1_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RO(RPn_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RW(max_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RW(min_freq_mhz);
+
+static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
+
+#define GEN6_ATTR(s) { \
+ &dev_attr_##s##_act_freq_mhz.attr, \
+ &dev_attr_##s##_cur_freq_mhz.attr, \
+ &dev_attr_##s##_boost_freq_mhz.attr, \
+ &dev_attr_##s##_max_freq_mhz.attr, \
+ &dev_attr_##s##_min_freq_mhz.attr, \
+ &dev_attr_##s##_RP0_freq_mhz.attr, \
+ &dev_attr_##s##_RP1_freq_mhz.attr, \
+ &dev_attr_##s##_RPn_freq_mhz.attr, \
+ NULL, \
+ }
+
+#define GEN6_RPS_ATTR GEN6_ATTR(rps)
+#define GEN6_GT_ATTR GEN6_ATTR(gt)
+
+static const struct attribute * const gen6_rps_attrs[] = GEN6_RPS_ATTR;
+static const struct attribute * const gen6_gt_attrs[] = GEN6_GT_ATTR;
+
+static ssize_t punit_req_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ u32 preq = intel_rps_read_punit_req_frequency(&gt->rps);
+
+ return sysfs_emit(buff, "%u\n", preq);
+}
+
+struct intel_gt_bool_throttle_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ i915_reg_t reg32;
+ u32 mask;
+};
+
+static ssize_t throttle_reason_bool_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ struct intel_gt_bool_throttle_attr *t_attr =
+ (struct intel_gt_bool_throttle_attr *) attr;
+ bool val = rps_read_mask_mmio(&gt->rps, t_attr->reg32, t_attr->mask);
+
+ return sysfs_emit(buff, "%u\n", val);
+}
+
+#define INTEL_GT_RPS_BOOL_ATTR_RO(sysfs_func__, mask__) \
+struct intel_gt_bool_throttle_attr attr_##sysfs_func__ = { \
+ .attr = { .name = __stringify(sysfs_func__), .mode = 0444 }, \
+ .show = throttle_reason_bool_show, \
+ .reg32 = GT0_PERF_LIMIT_REASONS, \
+ .mask = mask__, \
+}
+
+static DEVICE_ATTR_RO(punit_req_freq_mhz);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_status, GT0_PERF_LIMIT_REASONS_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl1, POWER_LIMIT_1_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl2, POWER_LIMIT_2_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl4, POWER_LIMIT_4_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_thermal, THERMAL_LIMIT_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_prochot, PROCHOT_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
+
+static const struct attribute *freq_attrs[] = {
+ &dev_attr_punit_req_freq_mhz.attr,
+ &attr_throttle_reason_status.attr,
+ &attr_throttle_reason_pl1.attr,
+ &attr_throttle_reason_pl2.attr,
+ &attr_throttle_reason_pl4.attr,
+ &attr_throttle_reason_thermal.attr,
+ &attr_throttle_reason_prochot.attr,
+ &attr_throttle_reason_ratl.attr,
+ &attr_throttle_reason_vr_thermalert.attr,
+ &attr_throttle_reason_vr_tdc.attr,
+ NULL
+};
+
+static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj,
+ const struct attribute * const *attrs)
+{
+ int ret;
+
+ if (GRAPHICS_VER(gt->i915) < 6)
+ return 0;
+
+ ret = sysfs_create_files(kobj, attrs);
+ if (ret)
+ return ret;
+
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
+ ret = sysfs_create_file(kobj, &dev_attr_vlv_rpe_freq_mhz.attr);
+
+ return ret;
+}
+
+void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
+{
+ int ret;
+
+ intel_sysfs_rc6_init(gt, kobj);
+
+ ret = is_object_gt(kobj) ?
+ intel_sysfs_rps_init(gt, kobj, gen6_rps_attrs) :
+ intel_sysfs_rps_init(gt, kobj, gen6_gt_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u RPS sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+
+ /* end of the legacy interfaces */
+ if (!is_object_gt(kobj))
+ return;
+
+ ret = sysfs_create_files(kobj, freq_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u throttle sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h
new file mode 100644
index 000000000000..f567105a4a89
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __SYSFS_GT_PM_H__
+#define __SYSFS_GT_PM_H__
+
+#include <linux/kobject.h>
+
+#include "intel_gt_types.h"
+
+void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj);
+
+#endif /* SYSFS_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index f20687796490..b06611c1d4ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -16,10 +16,12 @@
#include <linux/workqueue.h>
#include "uc/intel_uc.h"
+#include "intel_gsc.h"
#include "i915_vma.h"
#include "intel_engine_types.h"
#include "intel_gt_buffer_pool_types.h"
+#include "intel_hwconfig.h"
#include "intel_llc_types.h"
#include "intel_reset_types.h"
#include "intel_rc6_types.h"
@@ -72,6 +74,7 @@ struct intel_gt {
struct i915_ggtt *ggtt;
struct intel_uc uc;
+ struct intel_gsc gsc;
struct mutex tlb_invalidate_lock;
@@ -182,7 +185,19 @@ struct intel_gt {
const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES];
+ struct {
+ u8 groupid;
+ u8 instanceid;
+ } default_steering;
+
+ /*
+ * Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT.
+ */
+ phys_addr_t phys_addr;
+
struct intel_gt_info {
+ unsigned int id;
+
intel_engine_mask_t engine_mask;
u32 l3bank_mask;
@@ -199,6 +214,9 @@ struct intel_gt {
struct sseu_dev_info sseu;
unsigned long mslice_mask;
+
+ /** @hwconfig: hardware configuration data */
+ struct intel_hwconfig hwconfig;
} info;
struct {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index a5f5b2dda332..b67831833c9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -13,10 +13,22 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
+#include "i915_utils.h"
#include "intel_gt.h"
#include "intel_gt_regs.h"
#include "intel_gtt.h"
+
+static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
+{
+ return IS_BROXTON(i915) && i915_vtd_active(i915);
+}
+
+bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
+{
+ return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
+}
+
struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
{
struct drm_i915_gem_object *obj;
@@ -97,32 +109,52 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
return 0;
}
-void __i915_vm_close(struct i915_address_space *vm)
+static void clear_vm_list(struct list_head *list)
{
struct i915_vma *vma, *vn;
- if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
- return;
-
- list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ list_for_each_entry_safe(vma, vn, list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
- if (!kref_get_unless_zero(&obj->base.refcount)) {
+ if (!i915_gem_object_get_rcu(obj)) {
/*
- * Unbind the dying vma to ensure the bound_list
+ * Object is dying, but has not yet cleared its
+ * vma list.
+ * Unbind the dying vma to ensure our list
* is completely drained. We leave the destruction to
- * the object destructor.
+ * the object destructor to avoid the vma
+ * disappearing under it.
*/
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
WARN_ON(__i915_vma_unbind(vma));
- continue;
+
+ /* Remove from the unbound list */
+ list_del_init(&vma->vm_link);
+
+ /*
+ * Delay the vm and vm mutex freeing until the
+ * object is done with destruction.
+ */
+ i915_vm_resv_get(vma->vm);
+ vma->vm_ddestroy = true;
+ } else {
+ i915_vma_destroy_locked(vma);
+ i915_gem_object_put(obj);
}
- /* Keep the obj (and hence the vma) alive as _we_ destroy it */
- i915_vma_destroy_locked(vma);
- i915_gem_object_put(obj);
}
+}
+
+static void __i915_vm_close(struct i915_address_space *vm)
+{
+ mutex_lock(&vm->mutex);
+
+ clear_vm_list(&vm->bound_list);
+ clear_vm_list(&vm->unbound_list);
+
+ /* Check for must-fix unanticipated side-effects */
GEM_BUG_ON(!list_empty(&vm->bound_list));
+ GEM_BUG_ON(!list_empty(&vm->unbound_list));
mutex_unlock(&vm->mutex);
}
@@ -144,7 +176,6 @@ int i915_vm_lock_objects(struct i915_address_space *vm,
void i915_address_space_fini(struct i915_address_space *vm)
{
drm_mm_takedown(&vm->mm);
- mutex_destroy(&vm->mutex);
}
/**
@@ -152,7 +183,8 @@ void i915_address_space_fini(struct i915_address_space *vm)
* @kref: Pointer to the &i915_address_space.resv_ref member.
*
* This function is called when the last lock sharer no longer shares the
- * &i915_address_space._resv lock.
+ * &i915_address_space._resv lock, and also if we raced when
+ * destroying a vma by the vma destruction
*/
void i915_vm_resv_release(struct kref *kref)
{
@@ -160,6 +192,8 @@ void i915_vm_resv_release(struct kref *kref)
container_of(kref, typeof(*vm), resv_ref);
dma_resv_fini(&vm->_resv);
+ mutex_destroy(&vm->mutex);
+
kfree(vm);
}
@@ -168,6 +202,8 @@ static void __i915_vm_release(struct work_struct *work)
struct i915_address_space *vm =
container_of(work, struct i915_address_space, release_work);
+ __i915_vm_close(vm);
+
/* Synchronize async unbinds. */
i915_vma_resource_bind_dep_sync_all(vm);
@@ -201,7 +237,6 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
vm->pending_unbind = RB_ROOT_CACHED;
INIT_WORK(&vm->release_work, __i915_vm_release);
- atomic_set(&vm->open, 1);
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -246,6 +281,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
INIT_LIST_HEAD(&vm->bound_list);
+ INIT_LIST_HEAD(&vm->unbound_list);
}
void *__px_vaddr(struct drm_i915_gem_object *p)
@@ -274,7 +310,7 @@ fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
void *vaddr = __px_vaddr(p);
memset64(vaddr, val, count);
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
}
static void poison_scratch_page(struct drm_i915_gem_object *scratch)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 9d83c2d3959c..a40d928b3888 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -240,15 +240,6 @@ struct i915_address_space {
unsigned int bind_async_flags;
- /*
- * Each active user context has its own address space (in full-ppgtt).
- * Since the vm may be shared between multiple contexts, we count how
- * many contexts keep us "open". Once open hits zero, we are closed
- * and do not allow any new attachments, and proceed to shutdown our
- * vma and page directories.
- */
- atomic_t open;
-
struct mutex mutex; /* protects vma and our lists */
struct kref resv_ref; /* kref to keep the reservation lock alive. */
@@ -263,6 +254,11 @@ struct i915_address_space {
*/
struct list_head bound_list;
+ /**
+ * List of vmas not yet bound or evicted.
+ */
+ struct list_head unbound_list;
+
/* Global GTT */
bool is_ggtt:1;
@@ -272,6 +268,9 @@ struct i915_address_space {
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
+ /* Skip pte rewrite on unbind for suspend. Protected by @mutex */
+ bool skip_pte_rewrite:1;
+
u8 top;
u8 pd_shift;
u8 scratch_order;
@@ -383,6 +382,8 @@ struct i915_ppgtt {
#define i915_is_dpt(vm) ((vm)->is_dpt)
#define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
+bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915);
+
int __must_check
i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
@@ -446,6 +447,17 @@ i915_vm_get(struct i915_address_space *vm)
return vm;
}
+static inline struct i915_address_space *
+i915_vm_tryget(struct i915_address_space *vm)
+{
+ return kref_get_unless_zero(&vm->ref) ? vm : NULL;
+}
+
+static inline void assert_vm_alive(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!kref_read(&vm->ref));
+}
+
/**
* i915_vm_resv_get - Obtain a reference on the vm's reservation lock
* @vm: The vm whose reservation lock we want to share.
@@ -476,34 +488,6 @@ static inline void i915_vm_resv_put(struct i915_address_space *vm)
kref_put(&vm->resv_ref, i915_vm_resv_release);
}
-static inline struct i915_address_space *
-i915_vm_open(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!atomic_read(&vm->open));
- atomic_inc(&vm->open);
- return i915_vm_get(vm);
-}
-
-static inline bool
-i915_vm_tryopen(struct i915_address_space *vm)
-{
- if (atomic_add_unless(&vm->open, 1, 0))
- return i915_vm_get(vm);
-
- return false;
-}
-
-void __i915_vm_close(struct i915_address_space *vm);
-
-static inline void
-i915_vm_close(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!atomic_read(&vm->open));
- __i915_vm_close(vm);
-
- i915_vm_put(vm);
-}
-
void i915_address_space_init(struct i915_address_space *vm, int subclass);
void i915_address_space_fini(struct i915_address_space *vm);
@@ -565,6 +549,14 @@ i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
+void intel_ggtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
+void intel_ggtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res);
+
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
int i915_ggtt_enable_hw(struct drm_i915_private *i915);
@@ -635,6 +627,7 @@ release_pd_entry(struct i915_page_directory * const pd,
struct i915_page_table * const pt,
const struct drm_i915_gem_object * const scratch);
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
+void gen8_ggtt_invalidate(struct i915_ggtt *ggtt);
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
diff --git a/drivers/gpu/drm/i915/gt/intel_hwconfig.h b/drivers/gpu/drm/i915/gt/intel_hwconfig.h
new file mode 100644
index 000000000000..322290780b67
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_hwconfig.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _INTEL_HWCONFIG_H_
+#define _INTEL_HWCONFIG_H_
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+struct intel_hwconfig {
+ u32 size;
+ void *ptr;
+};
+
+int intel_gt_init_hwconfig(struct intel_gt *gt);
+void intel_gt_fini_hwconfig(struct intel_gt *gt);
+
+#endif /* _INTEL_HWCONFIG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 07bef7128fdb..eec73c66406c 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -778,7 +778,7 @@ static void init_common_regs(u32 * const regs,
CTX_CTRL_RS_CTX_ENABLE);
regs[CTX_CONTEXT_CONTROL] = ctl;
- regs[CTX_TIMESTAMP] = ce->runtime.last;
+ regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
}
static void init_wa_bb_regs(u32 * const regs,
@@ -904,6 +904,24 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
engine->name);
}
+static u32 context_wa_bb_offset(const struct intel_context *ce)
+{
+ return PAGE_SIZE * ce->wa_bb_page;
+}
+
+static u32 *context_indirect_bb(const struct intel_context *ce)
+{
+ void *ptr;
+
+ GEM_BUG_ON(!ce->wa_bb_page);
+
+ ptr = ce->lrc_reg_state;
+ ptr -= LRC_STATE_OFFSET; /* back to start of context image */
+ ptr += context_wa_bb_offset(ce);
+
+ return ptr;
+}
+
void lrc_init_state(struct intel_context *ce,
struct intel_engine_cs *engine,
void *state)
@@ -922,6 +940,10 @@ void lrc_init_state(struct intel_context *ce,
/* Clear the ppHWSP (inc. per-context counters) */
memset(state, 0, PAGE_SIZE);
+ /* Clear the indirect wa and storage */
+ if (ce->wa_bb_page)
+ memset(state + context_wa_bb_offset(ce), 0, PAGE_SIZE);
+
/*
* The second page of the context object contains some registers which
* must be set up prior to the first execution.
@@ -929,6 +951,35 @@ void lrc_init_state(struct intel_context *ce,
__lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit);
}
+u32 lrc_indirect_bb(const struct intel_context *ce)
+{
+ return i915_ggtt_offset(ce->state) + context_wa_bb_offset(ce);
+}
+
+static u32 *setup_predicate_disable_wa(const struct intel_context *ce, u32 *cs)
+{
+ /* If predication is active, this will be noop'ed */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2);
+ *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA;
+ *cs++ = 0;
+ *cs++ = 0; /* No predication */
+
+ /* predicated end, only terminates if SET_PREDICATE_RESULT:0 is clear */
+ *cs++ = MI_BATCH_BUFFER_END | BIT(15);
+ *cs++ = MI_SET_PREDICATE | MI_SET_PREDICATE_DISABLE;
+
+ /* Instructions are no longer predicated (disabled), we can proceed */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2);
+ *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA;
+ *cs++ = 0;
+ *cs++ = 1; /* enable predication before the next BB */
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ GEM_BUG_ON(offset_in_page(cs) > DG2_PREDICATE_RESULT_WA);
+
+ return cs;
+}
+
static struct i915_vma *
__lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
{
@@ -1208,6 +1259,10 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
IS_DG2_G11(ce->engine->i915))
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
+ /* hsdes: 1809175790 */
+ if (!HAS_FLAT_CCS(ce->engine->i915))
+ cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+
return cs;
}
@@ -1225,25 +1280,15 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
0);
- return cs;
-}
-
-static u32 context_wa_bb_offset(const struct intel_context *ce)
-{
- return PAGE_SIZE * ce->wa_bb_page;
-}
-
-static u32 *context_indirect_bb(const struct intel_context *ce)
-{
- void *ptr;
-
- GEM_BUG_ON(!ce->wa_bb_page);
-
- ptr = ce->lrc_reg_state;
- ptr -= LRC_STATE_OFFSET; /* back to start of context image */
- ptr += context_wa_bb_offset(ce);
+ /* hsdes: 1809175790 */
+ if (!HAS_FLAT_CCS(ce->engine->i915)) {
+ if (ce->engine->class == VIDEO_DECODE_CLASS)
+ cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
+ else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
+ cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
+ }
- return ptr;
+ return cs;
}
static void
@@ -1259,9 +1304,11 @@ setup_indirect_ctx_bb(const struct intel_context *ce,
while ((unsigned long)cs % CACHELINE_BYTES)
*cs++ = MI_NOOP;
+ GEM_BUG_ON(cs - start > DG2_PREDICATE_RESULT_BB / sizeof(*start));
+ setup_predicate_disable_wa(ce, start + DG2_PREDICATE_RESULT_BB / sizeof(*start));
+
lrc_setup_indirect_ctx(ce->lrc_reg_state, engine,
- i915_ggtt_offset(ce->state) +
- context_wa_bb_offset(ce),
+ lrc_indirect_bb(ce),
(cs - start) * sizeof(*cs));
}
@@ -1722,11 +1769,12 @@ err:
}
}
-static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
+static void st_runtime_underflow(struct intel_context_stats *stats, s32 dt)
{
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
- ce->runtime.num_underflow++;
- ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+ stats->runtime.num_underflow++;
+ stats->runtime.max_underflow =
+ max_t(u32, stats->runtime.max_underflow, -dt);
#endif
}
@@ -1743,25 +1791,25 @@ static u32 lrc_get_runtime(const struct intel_context *ce)
void lrc_update_runtime(struct intel_context *ce)
{
+ struct intel_context_stats *stats = &ce->stats;
u32 old;
s32 dt;
- if (intel_context_is_barrier(ce))
+ old = stats->runtime.last;
+ stats->runtime.last = lrc_get_runtime(ce);
+ dt = stats->runtime.last - old;
+ if (!dt)
return;
- old = ce->runtime.last;
- ce->runtime.last = lrc_get_runtime(ce);
- dt = ce->runtime.last - old;
-
if (unlikely(dt < 0)) {
CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
- old, ce->runtime.last, dt);
- st_update_runtime_underflow(ce, dt);
+ old, stats->runtime.last, dt);
+ st_runtime_underflow(stats, dt);
return;
}
- ewma_runtime_add(&ce->runtime.avg, dt);
- ce->runtime.total += dt;
+ ewma_runtime_add(&stats->runtime.avg, dt);
+ stats->runtime.total += dt;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 6e4f9f58fca5..31be734010db 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -11,9 +11,10 @@
#include <linux/bitfield.h>
#include <linux/types.h>
+#include "intel_context.h"
+
struct drm_i915_gem_object;
struct i915_gem_ww_ctx;
-struct intel_context;
struct intel_engine_cs;
struct intel_ring;
struct kref;
@@ -120,4 +121,33 @@ static inline u32 lrc_desc_priority(int prio)
return GEN12_CTX_PRIORITY_NORMAL;
}
+static inline void lrc_runtime_start(struct intel_context *ce)
+{
+ struct intel_context_stats *stats = &ce->stats;
+
+ if (intel_context_is_barrier(ce))
+ return;
+
+ if (stats->active)
+ return;
+
+ WRITE_ONCE(stats->active, intel_context_clock());
+}
+
+static inline void lrc_runtime_stop(struct intel_context *ce)
+{
+ struct intel_context_stats *stats = &ce->stats;
+
+ if (!stats->active)
+ return;
+
+ lrc_update_runtime(ce);
+ WRITE_ONCE(stats->active, 0);
+}
+
+#define DG2_PREDICATE_RESULT_WA (PAGE_SIZE - sizeof(u64))
+#define DG2_PREDICATE_RESULT_BB (2048)
+
+u32 lrc_indirect_bb(const struct intel_context *ce);
+
#endif /* __INTEL_LRC_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 20444d6ceb3c..2c35324b5f68 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -17,6 +17,8 @@ struct insert_pte_data {
#define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
+#define GET_CCS_BYTES(i915, size) (HAS_FLAT_CCS(i915) ? \
+ DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0)
static bool engine_supports_migration(struct intel_engine_cs *engine)
{
if (!engine)
@@ -467,6 +469,128 @@ static bool wa_1209644611_applies(int ver, u32 size)
return height % 4 == 3 && height <= 8;
}
+/**
+ * DOC: Flat-CCS - Memory compression for Local memory
+ *
+ * On Xe-HP and later devices, we use dedicated compression control state (CCS)
+ * stored in local memory for each surface, to support the 3D and media
+ * compression formats.
+ *
+ * The memory required for the CCS of the entire local memory is 1/256 of the
+ * local memory size. So before the kernel boot, the required memory is reserved
+ * for the CCS data and a secure register will be programmed with the CCS base
+ * address.
+ *
+ * Flat CCS data needs to be cleared when a lmem object is allocated.
+ * And CCS data can be copied in and out of CCS region through
+ * XY_CTRL_SURF_COPY_BLT. CPU can't access the CCS data directly.
+ *
+ * I915 supports Flat-CCS on lmem only objects. When an objects has smem in
+ * its preference list, on memory pressure, i915 needs to migrate the lmem
+ * content into smem. If the lmem object is Flat-CCS compressed by userspace,
+ * then i915 needs to decompress it. But I915 lack the required information
+ * for such decompression. Hence I915 supports Flat-CCS only on lmem only objects.
+ *
+ * When we exhaust the lmem, Flat-CCS capable objects' lmem backing memory can
+ * be temporarily evicted to smem, along with the auxiliary CCS state, where
+ * it can be potentially swapped-out at a later point, if required.
+ * If userspace later touches the evicted pages, then we always move
+ * the backing memory back to lmem, which includes restoring the saved CCS state,
+ * and potentially performing any required swap-in.
+ *
+ * For the migration of the lmem objects with smem in placement list, such as
+ * {lmem, smem}, objects are treated as non Flat-CCS capable objects.
+ */
+
+static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
+{
+ *cmd++ = MI_FLUSH_DW | flags;
+ *cmd++ = 0;
+ *cmd++ = 0;
+
+ return cmd;
+}
+
+static u32 calc_ctrl_surf_instr_size(struct drm_i915_private *i915, int size)
+{
+ u32 num_cmds, num_blks, total_size;
+
+ if (!GET_CCS_BYTES(i915, size))
+ return 0;
+
+ /*
+ * XY_CTRL_SURF_COPY_BLT transfers CCS in 256 byte
+ * blocks. one XY_CTRL_SURF_COPY_BLT command can
+ * transfer upto 1024 blocks.
+ */
+ num_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
+ NUM_CCS_BYTES_PER_BLOCK);
+ num_cmds = DIV_ROUND_UP(num_blks, NUM_CCS_BLKS_PER_XFER);
+ total_size = XY_CTRL_SURF_INSTR_SIZE * num_cmds;
+
+ /*
+ * Adding a flush before and after XY_CTRL_SURF_COPY_BLT
+ */
+ total_size += 2 * MI_FLUSH_DW_SIZE;
+
+ return total_size;
+}
+
+static int emit_copy_ccs(struct i915_request *rq,
+ u32 dst_offset, u8 dst_access,
+ u32 src_offset, u8 src_access, int size)
+{
+ struct drm_i915_private *i915 = rq->engine->i915;
+ int mocs = rq->engine->gt->mocs.uc_index << 1;
+ u32 num_ccs_blks, ccs_ring_size;
+ u32 *cs;
+
+ ccs_ring_size = calc_ctrl_surf_instr_size(i915, size);
+ WARN_ON(!ccs_ring_size);
+
+ cs = intel_ring_begin(rq, round_up(ccs_ring_size, 2));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ num_ccs_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
+ NUM_CCS_BYTES_PER_BLOCK);
+ GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
+ cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
+
+ /*
+ * The XY_CTRL_SURF_COPY_BLT instruction is used to copy the CCS
+ * data in and out of the CCS region.
+ *
+ * We can copy at most 1024 blocks of 256 bytes using one
+ * XY_CTRL_SURF_COPY_BLT instruction.
+ *
+ * In case we need to copy more than 1024 blocks, we need to add
+ * another instruction to the same batch buffer.
+ *
+ * 1024 blocks of 256 bytes of CCS represent a total 256KB of CCS.
+ *
+ * 256 KB of CCS represents 256 * 256 KB = 64 MB of LMEM.
+ */
+ *cs++ = XY_CTRL_SURF_COPY_BLT |
+ src_access << SRC_ACCESS_TYPE_SHIFT |
+ dst_access << DST_ACCESS_TYPE_SHIFT |
+ ((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT;
+ *cs++ = src_offset;
+ *cs++ = rq->engine->instance |
+ FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
+ *cs++ = dst_offset;
+ *cs++ = rq->engine->instance |
+ FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
+
+ cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
+ if (ccs_ring_size & 1)
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
static int emit_copy(struct i915_request *rq,
u32 dst_offset, u32 src_offset, int size)
{
@@ -514,6 +638,57 @@ static int emit_copy(struct i915_request *rq,
return 0;
}
+static int scatter_list_length(struct scatterlist *sg)
+{
+ int len = 0;
+
+ while (sg && sg_dma_len(sg)) {
+ len += sg_dma_len(sg);
+ sg = sg_next(sg);
+ };
+
+ return len;
+}
+
+static void
+calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
+ int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy)
+{
+ if (ccs_bytes_to_cpy) {
+ if (!src_is_lmem)
+ /*
+ * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
+ * will be taken for the blt. in Flat-ccs supported
+ * platform Smem obj will have more pages than required
+ * for main meory hence limit it to the required size
+ * for main memory
+ */
+ *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ);
+ } else { /* ccs handling is not required */
+ *src_sz = CHUNK_SZ;
+ }
+}
+
+static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy)
+{
+ u32 len;
+
+ do {
+ GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
+ len = it->max - it->dma;
+ if (len > bytes_to_cpy) {
+ it->dma += bytes_to_cpy;
+ break;
+ }
+
+ bytes_to_cpy -= len;
+
+ it->sg = __sg_next(it->sg);
+ it->dma = sg_dma_address(it->sg);
+ it->max = it->dma + sg_dma_len(it->sg);
+ } while (bytes_to_cpy);
+}
+
int
intel_context_migrate_copy(struct intel_context *ce,
const struct i915_deps *deps,
@@ -525,17 +700,67 @@ intel_context_migrate_copy(struct intel_context *ce,
bool dst_is_lmem,
struct i915_request **out)
{
- struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst);
+ struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
+ struct drm_i915_private *i915 = ce->engine->i915;
+ u32 ccs_bytes_to_cpy = 0, bytes_to_cpy;
+ enum i915_cache_level ccs_cache_level;
+ u32 src_offset, dst_offset;
+ u8 src_access, dst_access;
struct i915_request *rq;
+ int src_sz, dst_sz;
+ bool ccs_is_src;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
+ GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem));
*out = NULL;
GEM_BUG_ON(ce->ring->size < SZ_64K);
+ src_sz = scatter_list_length(src);
+ bytes_to_cpy = src_sz;
+
+ if (HAS_FLAT_CCS(i915) && src_is_lmem ^ dst_is_lmem) {
+ src_access = !src_is_lmem && dst_is_lmem;
+ dst_access = !src_access;
+
+ dst_sz = scatter_list_length(dst);
+ if (src_is_lmem) {
+ it_ccs = it_dst;
+ ccs_cache_level = dst_cache_level;
+ ccs_is_src = false;
+ } else if (dst_is_lmem) {
+ bytes_to_cpy = dst_sz;
+ it_ccs = it_src;
+ ccs_cache_level = src_cache_level;
+ ccs_is_src = true;
+ }
+
+ /*
+ * When there is a eviction of ccs needed smem will have the
+ * extra pages for the ccs data
+ *
+ * TO-DO: Want to move the size mismatch check to a WARN_ON,
+ * but still we have some requests of smem->lmem with same size.
+ * Need to fix it.
+ */
+ ccs_bytes_to_cpy = src_sz != dst_sz ? GET_CCS_BYTES(i915, bytes_to_cpy) : 0;
+ if (ccs_bytes_to_cpy)
+ get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
+ }
+
+ src_offset = 0;
+ dst_offset = CHUNK_SZ;
+ if (HAS_64K_PAGES(ce->engine->i915)) {
+ src_offset = 0;
+ dst_offset = 0;
+ if (src_is_lmem)
+ src_offset = CHUNK_SZ;
+ if (dst_is_lmem)
+ dst_offset = 2 * CHUNK_SZ;
+ }
+
do {
- u32 src_offset, dst_offset;
int len;
rq = i915_request_create(ce);
@@ -563,22 +788,16 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
- src_offset = 0;
- dst_offset = CHUNK_SZ;
- if (HAS_64K_PAGES(ce->engine->i915)) {
- GEM_BUG_ON(!src_is_lmem && !dst_is_lmem);
-
- src_offset = 0;
- dst_offset = 0;
- if (src_is_lmem)
- src_offset = CHUNK_SZ;
- if (dst_is_lmem)
- dst_offset = 2 * CHUNK_SZ;
- }
+ calculate_chunk_sz(i915, src_is_lmem, &src_sz,
+ bytes_to_cpy, ccs_bytes_to_cpy);
len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
- src_offset, CHUNK_SZ);
- if (len <= 0) {
+ src_offset, src_sz);
+ if (!len) {
+ err = -EINVAL;
+ goto out_rq;
+ }
+ if (len < 0) {
err = len;
goto out_rq;
}
@@ -596,7 +815,44 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
- err = emit_copy(rq, dst_offset, src_offset, len);
+ err = emit_copy(rq, dst_offset, src_offset, len);
+ if (err)
+ goto out_rq;
+
+ bytes_to_cpy -= len;
+
+ if (ccs_bytes_to_cpy) {
+ int ccs_sz;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ ccs_sz = GET_CCS_BYTES(i915, len);
+ err = emit_pte(rq, &it_ccs, ccs_cache_level, false,
+ ccs_is_src ? src_offset : dst_offset,
+ ccs_sz);
+ if (err < 0)
+ goto out_rq;
+ if (err < ccs_sz) {
+ err = -EINVAL;
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ err = emit_copy_ccs(rq, dst_offset, dst_access,
+ src_offset, src_access, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+ ccs_bytes_to_cpy -= ccs_sz;
+ }
/* Arbitration is re-enabled between requests. */
out_rq:
@@ -604,9 +860,26 @@ out_rq:
i915_request_put(*out);
*out = i915_request_get(rq);
i915_request_add(rq);
- if (err || !it_src.sg || !sg_dma_len(it_src.sg))
+
+ if (err)
break;
+ if (!bytes_to_cpy && !ccs_bytes_to_cpy) {
+ if (src_is_lmem)
+ WARN_ON(it_src.sg && sg_dma_len(it_src.sg));
+ else
+ WARN_ON(it_dst.sg && sg_dma_len(it_dst.sg));
+ break;
+ }
+
+ if (WARN_ON(!it_src.sg || !sg_dma_len(it_src.sg) ||
+ !it_dst.sg || !sg_dma_len(it_dst.sg) ||
+ (ccs_bytes_to_cpy && (!it_ccs.sg ||
+ !sg_dma_len(it_ccs.sg))))) {
+ err = -EINVAL;
+ break;
+ }
+
cond_resched();
} while (1);
@@ -614,35 +887,65 @@ out_ce:
return err;
}
-static int emit_clear(struct i915_request *rq, u64 offset, int size, u32 value)
+static int emit_clear(struct i915_request *rq, u32 offset, int size,
+ u32 value, bool is_lmem)
{
- const int ver = GRAPHICS_VER(rq->engine->i915);
+ struct drm_i915_private *i915 = rq->engine->i915;
+ int mocs = rq->engine->gt->mocs.uc_index << 1;
+ const int ver = GRAPHICS_VER(i915);
+ int ring_sz;
u32 *cs;
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- offset += (u64)rq->engine->instance << 32;
+ if (HAS_FLAT_CCS(i915) && ver >= 12)
+ ring_sz = XY_FAST_COLOR_BLT_DW;
+ else if (ver >= 8)
+ ring_sz = 8;
+ else
+ ring_sz = 6;
- cs = intel_ring_begin(rq, ver >= 8 ? 8 : 6);
+ cs = intel_ring_begin(rq, ring_sz);
if (IS_ERR(cs))
return PTR_ERR(cs);
- if (ver >= 8) {
+ if (HAS_FLAT_CCS(i915) && ver >= 12) {
+ *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
+ (XY_FAST_COLOR_BLT_DW - 2);
+ *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
+ (PAGE_SIZE - 1);
+ *cs++ = 0;
+ *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = offset;
+ *cs++ = rq->engine->instance;
+ *cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
+ /* BG7 */
+ *cs++ = value;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ /* BG11 */
+ *cs++ = 0;
+ *cs++ = 0;
+ /* BG13 */
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ } else if (ver >= 8) {
*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cs++ = 0;
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
+ *cs++ = offset;
+ *cs++ = rq->engine->instance;
*cs++ = value;
*cs++ = MI_NOOP;
} else {
- GEM_BUG_ON(upper_32_bits(offset));
*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cs++ = 0;
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
- *cs++ = lower_32_bits(offset);
+ *cs++ = offset;
*cs++ = value;
}
@@ -659,8 +962,10 @@ intel_context_migrate_clear(struct intel_context *ce,
u32 value,
struct i915_request **out)
{
+ struct drm_i915_private *i915 = ce->engine->i915;
struct sgt_dma it = sg_sgt(sg);
struct i915_request *rq;
+ u32 offset;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
@@ -668,8 +973,11 @@ intel_context_migrate_clear(struct intel_context *ce,
GEM_BUG_ON(ce->ring->size < SZ_64K);
+ offset = 0;
+ if (HAS_64K_PAGES(i915) && is_lmem)
+ offset = CHUNK_SZ;
+
do {
- u32 offset;
int len;
rq = i915_request_create(ce);
@@ -697,10 +1005,6 @@ intel_context_migrate_clear(struct intel_context *ce,
if (err)
goto out_rq;
- offset = 0;
- if (HAS_64K_PAGES(ce->engine->i915) && is_lmem)
- offset = CHUNK_SZ;
-
len = emit_pte(rq, &it, cache_level, is_lmem, offset, CHUNK_SZ);
if (len <= 0) {
err = len;
@@ -711,7 +1015,22 @@ intel_context_migrate_clear(struct intel_context *ce,
if (err)
goto out_rq;
- err = emit_clear(rq, offset, len, value);
+ err = emit_clear(rq, offset, len, value, is_lmem);
+ if (err)
+ goto out_rq;
+
+ if (HAS_FLAT_CCS(i915) && is_lmem && !value) {
+ /*
+ * copy the content of memory into corresponding
+ * ccs surface
+ */
+ err = emit_copy_ccs(rq, offset, INDIRECT_ACCESS, offset,
+ DIRECT_ACCESS, len);
+ if (err)
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
/* Arbitration is re-enabled between requests. */
out_rq:
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index d91e2beb7517..d8b94d638559 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -91,7 +91,7 @@ write_dma_entry(struct drm_i915_gem_object * const pdma,
u64 * const vaddr = __px_vaddr(pdma);
vaddr[idx] = encoded_entry;
- clflush_cache_range(&vaddr[idx], sizeof(u64));
+ drm_clflush_virt_range(&vaddr[idx], sizeof(u64));
}
void
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 6df359c534fe..b4770690e794 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -4,7 +4,9 @@
*/
#include <linux/pm_runtime.h>
+#include <linux/string_helpers.h>
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_vgpu.h"
@@ -324,9 +326,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
resource_size_t pcbr_offset;
pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
- pctx = i915_gem_object_create_stolen_for_preallocated(i915,
- pcbr_offset,
- pctx_size);
+ pctx = i915_gem_object_create_region_at(i915->mm.stolen_region,
+ pcbr_offset,
+ pctx_size,
+ 0);
if (IS_ERR(pctx))
return PTR_ERR(pctx);
@@ -430,8 +433,8 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
drm_dbg(&i915->drm, "BIOS enabled RC states: "
"HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
- onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
- onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+ str_on_off(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+ str_on_off(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
rc_sw_target);
if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 6cecfdae07ad..f5111c0a0060 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -93,6 +93,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
struct intel_memory_region *mem;
resource_size_t min_page_size;
resource_size_t io_start;
+ resource_size_t io_size;
resource_size_t lmem_size;
int err;
@@ -122,9 +123,14 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
}
+ if (i915->params.lmem_size > 0) {
+ lmem_size = min_t(resource_size_t, lmem_size,
+ mul_u32_u32(i915->params.lmem_size, SZ_1M));
+ }
io_start = pci_resource_start(pdev, 2);
- if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
+ io_size = min(pci_resource_len(pdev, 2), lmem_size);
+ if (!io_size)
return ERR_PTR(-ENODEV);
min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
@@ -134,7 +140,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size,
min_page_size,
io_start,
- lmem_size,
+ io_size,
INTEL_MEMORY_LOCAL,
0,
&intel_region_lmem_ops);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index b7c6d4462ec5..a5338c3fde7a 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -5,6 +5,7 @@
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
+#include <linux/string_helpers.h>
#include "display/intel_display.h"
#include "display/intel_overlay.h"
@@ -137,7 +138,7 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
{
bool banned = false;
- RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
+ RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
GEM_BUG_ON(__i915_request_is_complete(rq));
rcu_read_lock(); /* protect the GEM context */
@@ -771,14 +772,15 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
+ /* For GuC mode, ensure submission is disabled before stopping ring */
+ intel_uc_reset_prepare(&gt->uc);
+
for_each_engine(engine, gt, id) {
if (intel_engine_pm_get_if_awake(engine))
awake |= engine->mask;
reset_prepare_engine(engine);
}
- intel_uc_reset_prepare(&gt->uc);
-
return awake;
}
@@ -1318,7 +1320,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
engine_mask &= gt->info.engine_mask;
if (flags & I915_ERROR_CAPTURE) {
- i915_capture_error_state(gt, engine_mask);
+ i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
intel_gt_clear_error_registers(gt, engine_mask);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 6d7ec3bf1f32..5423bfd301ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -767,7 +767,7 @@ static int mi_set_context(struct i915_request *rq,
if (GRAPHICS_VER(i915) == 7) {
if (num_engines) {
struct intel_engine_cs *signaller;
- i915_reg_t last_reg = {}; /* keep gcc quiet */
+ i915_reg_t last_reg = INVALID_MMIO_REG; /* keep gcc quiet */
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
for_each_engine(signaller, engine->gt, id) {
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index c8124101aada..3476a11f294c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/string_helpers.h>
+
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -772,7 +774,8 @@ static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
{
- GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
+ GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n",
+ str_yes_no(interactive));
mutex_lock(&rps->power.mutex);
if (interactive) {
@@ -1067,23 +1070,66 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
return 0;
}
-static void gen6_rps_init(struct intel_rps *rps)
+static u32 intel_rps_read_state_cap(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- u32 rp_state_cap = intel_rps_read_state_cap(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
- /* All of these values are in units of 50MHz */
+ if (IS_XEHPSDV(i915))
+ return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
+ else if (IS_GEN9_LP(i915))
+ return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+ else
+ return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+}
+
+/**
+ * gen6_rps_get_freq_caps - Get freq caps exposed by HW
+ * @rps: the intel_rps structure
+ * @caps: returned freq caps
+ *
+ * Returned "caps" frequencies should be converted to MHz using
+ * intel_gpu_freq()
+ */
+void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 rp_state_cap;
+
+ rp_state_cap = intel_rps_read_state_cap(rps);
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
if (IS_GEN9_LP(i915)) {
- rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 0) & 0xff;
+ caps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+ caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ caps->min_freq = (rp_state_cap >> 0) & 0xff;
} else {
- rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 16) & 0xff;
+ caps->rp0_freq = (rp_state_cap >> 0) & 0xff;
+ caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ caps->min_freq = (rp_state_cap >> 16) & 0xff;
+ }
+
+ if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
+ /*
+ * In this case rp_state_cap register reports frequencies in
+ * units of 50 MHz. Convert these to the actual "hw unit", i.e.
+ * units of 16.67 MHz
+ */
+ caps->rp0_freq *= GEN9_FREQ_SCALER;
+ caps->rp1_freq *= GEN9_FREQ_SCALER;
+ caps->min_freq *= GEN9_FREQ_SCALER;
}
+}
+
+static void gen6_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_rps_freq_caps caps;
+
+ gen6_rps_get_freq_caps(rps, &caps);
+ rps->rp0_freq = caps.rp0_freq;
+ rps->rp1_freq = caps.rp1_freq;
+ rps->min_freq = caps.min_freq;
/* hw_max = RP0 until we check for overclocking */
rps->max_freq = rps->rp0_freq;
@@ -1092,26 +1138,18 @@ static void gen6_rps_init(struct intel_rps *rps)
if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
u32 ddcc_status = 0;
+ u32 mult = 1;
+ if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11)
+ mult = GEN9_FREQ_SCALER;
if (snb_pcode_read(i915, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status, NULL) == 0)
rps->efficient_freq =
- clamp_t(u8,
- (ddcc_status >> 8) & 0xff,
+ clamp_t(u32,
+ ((ddcc_status >> 8) & 0xff) * mult,
rps->min_freq,
rps->max_freq);
}
-
- if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
- /* Store the frequency values in 16.66 MHZ units, which is
- * the natural hardware unit for SKL
- */
- rps->rp0_freq *= GEN9_FREQ_SCALER;
- rps->rp1_freq *= GEN9_FREQ_SCALER;
- rps->min_freq *= GEN9_FREQ_SCALER;
- rps->max_freq *= GEN9_FREQ_SCALER;
- rps->efficient_freq *= GEN9_FREQ_SCALER;
- }
}
static bool rps_reset(struct intel_rps *rps)
@@ -1279,7 +1317,8 @@ static bool chv_rps_enable(struct intel_rps *rps)
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
"GPLL not enabled\n");
- drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ drm_dbg(&i915->drm, "GPLL enabled? %s\n",
+ str_yes_no(val & GPLLENABLE));
drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
return rps_reset(rps);
@@ -1380,7 +1419,8 @@ static bool vlv_rps_enable(struct intel_rps *rps)
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
"GPLL not enabled\n");
- drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ drm_dbg(&i915->drm, "GPLL enabled? %s\n",
+ str_yes_no(val & GPLLENABLE));
drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
return rps_reset(rps);
@@ -1772,7 +1812,7 @@ static void rps_work(struct work_struct *work)
GT_TRACE(gt,
"pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
- pm_iir, yesno(client_boost),
+ pm_iir, str_yes_no(client_boost),
adj, new_freq, min, max);
if (client_boost && new_freq < rps->boost_freq) {
@@ -2214,19 +2254,6 @@ int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
return set_min_freq(rps, val);
}
-u32 intel_rps_read_state_cap(struct intel_rps *rps)
-{
- struct drm_i915_private *i915 = rps_to_i915(rps);
- struct intel_uncore *uncore = rps_to_uncore(rps);
-
- if (IS_XEHPSDV(i915))
- return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
- else if (IS_GEN9_LP(i915))
- return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
- else
- return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
-}
-
static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
@@ -2239,18 +2266,18 @@ static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
void intel_rps_raise_unslice(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
- u32 rp0_unslice_req;
mutex_lock(&rps->lock);
if (rps_uses_slpc(rps)) {
/* RP limits have not been initialized yet for SLPC path */
- rp0_unslice_req = ((intel_rps_read_state_cap(rps) >> 0)
- & 0xff) * GEN9_FREQ_SCALER;
+ struct intel_rps_freq_caps caps;
+
+ gen6_rps_get_freq_caps(rps, &caps);
intel_rps_set_manual(rps, true);
intel_uncore_write(uncore, GEN6_RPNSWREQ,
- ((rp0_unslice_req <<
+ ((caps.rp0_freq <<
GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
GEN9_IGNORE_SLICE_RATIO));
intel_rps_set_manual(rps, false);
@@ -2264,18 +2291,18 @@ void intel_rps_raise_unslice(struct intel_rps *rps)
void intel_rps_lower_unslice(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
- u32 rpn_unslice_req;
mutex_lock(&rps->lock);
if (rps_uses_slpc(rps)) {
/* RP limits have not been initialized yet for SLPC path */
- rpn_unslice_req = ((intel_rps_read_state_cap(rps) >> 16)
- & 0xff) * GEN9_FREQ_SCALER;
+ struct intel_rps_freq_caps caps;
+
+ gen6_rps_get_freq_caps(rps, &caps);
intel_rps_set_manual(rps, true);
intel_uncore_write(uncore, GEN6_RPNSWREQ,
- ((rpn_unslice_req <<
+ ((caps.min_freq <<
GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
GEN9_IGNORE_SLICE_RATIO));
intel_rps_set_manual(rps, false);
@@ -2286,6 +2313,24 @@ void intel_rps_lower_unslice(struct intel_rps *rps)
mutex_unlock(&rps->lock);
}
+static u32 rps_read_mmio(struct intel_rps *rps, i915_reg_t reg32)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ val = intel_uncore_read(gt->uncore, reg32);
+
+ return val;
+}
+
+bool rps_read_mask_mmio(struct intel_rps *rps,
+ i915_reg_t reg32, u32 mask)
+{
+ return rps_read_mmio(rps, reg32) & mask;
+}
+
/* External interface for intel_ips.ko */
static struct drm_i915_private __rcu *ips_mchdev;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index c6d76a3d1331..1e8d56491308 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -7,6 +7,7 @@
#define INTEL_RPS_H
#include "intel_rps_types.h"
+#include "i915_reg_defs.h"
struct i915_request;
@@ -44,10 +45,13 @@ u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
u32 intel_rps_read_punit_req(struct intel_rps *rps);
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
-u32 intel_rps_read_state_cap(struct intel_rps *rps);
+void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps);
void intel_rps_raise_unslice(struct intel_rps *rps);
void intel_rps_lower_unslice(struct intel_rps *rps);
+u32 intel_rps_read_throttle_reason(struct intel_rps *rps);
+bool rps_read_mask_mmio(struct intel_rps *rps, i915_reg_t reg32, u32 mask);
+
void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 3941d8551f52..9173ec75f2b8 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -37,6 +37,21 @@ enum {
INTEL_RPS_TIMER,
};
+/**
+ * struct intel_rps_freq_caps - rps freq capabilities
+ * @rp0_freq: non-overclocked max frequency
+ * @rp1_freq: "less than" RP0 power/freqency
+ * @min_freq: aka RPn, minimum frequency
+ *
+ * Freq caps exposed by HW, values are in "hw units" and intel_gpu_freq()
+ * should be used to convert to MHz
+ */
+struct intel_rps_freq_caps {
+ u8 rp0_freq;
+ u8 rp1_freq;
+ u8 min_freq;
+};
+
struct intel_rps {
struct mutex lock; /* protects enabling and the worker */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 4ac0bbaf0c31..fdd25691beda 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/string_helpers.h>
+
#include "i915_drv.h"
#include "intel_engine_regs.h"
#include "intel_gt_regs.h"
@@ -33,8 +35,8 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
}
static u32
-_intel_sseu_get_subslices(const struct sseu_dev_info *sseu,
- const u8 *subslice_mask, u8 slice)
+sseu_get_subslices(const struct sseu_dev_info *sseu,
+ const u8 *subslice_mask, u8 slice)
{
int i, offset = slice * sseu->ss_stride;
u32 mask = 0;
@@ -49,12 +51,17 @@ _intel_sseu_get_subslices(const struct sseu_dev_info *sseu,
u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
{
- return _intel_sseu_get_subslices(sseu, sseu->subslice_mask, slice);
+ return sseu_get_subslices(sseu, sseu->subslice_mask, slice);
+}
+
+static u32 sseu_get_geometry_subslices(const struct sseu_dev_info *sseu)
+{
+ return sseu_get_subslices(sseu, sseu->geometry_subslice_mask, 0);
}
u32 intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu)
{
- return _intel_sseu_get_subslices(sseu, sseu->compute_subslice_mask, 0);
+ return sseu_get_subslices(sseu, sseu->compute_subslice_mask, 0);
}
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
@@ -711,22 +718,18 @@ void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "EU total: %u\n", sseu->eu_total);
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
drm_printf(p, "has slice power gating: %s\n",
- yesno(sseu->has_slice_pg));
+ str_yes_no(sseu->has_slice_pg));
drm_printf(p, "has subslice power gating: %s\n",
- yesno(sseu->has_subslice_pg));
- drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
+ str_yes_no(sseu->has_subslice_pg));
+ drm_printf(p, "has EU power gating: %s\n",
+ str_yes_no(sseu->has_eu_pg));
}
-void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
- struct drm_printer *p)
+static void sseu_print_hsw_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
{
int s, ss;
- if (sseu->max_slices == 0) {
- drm_printf(p, "Unavailable\n");
- return;
- }
-
for (s = 0; s < sseu->max_slices; s++) {
drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
s, intel_sseu_subslices_per_slice(sseu, s),
@@ -741,6 +744,36 @@ void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
}
}
+static void sseu_print_xehp_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ u32 g_dss_mask = sseu_get_geometry_subslices(sseu);
+ u32 c_dss_mask = intel_sseu_get_compute_subslices(sseu);
+ int dss;
+
+ for (dss = 0; dss < sseu->max_subslices; dss++) {
+ u16 enabled_eus = sseu_get_eus(sseu, 0, dss);
+
+ drm_printf(p, "DSS_%02d: G:%3s C:%3s, %2u EUs (0x%04hx)\n", dss,
+ str_yes_no(g_dss_mask & BIT(dss)),
+ str_yes_no(c_dss_mask & BIT(dss)),
+ hweight16(enabled_eus), enabled_eus);
+ }
+}
+
+void intel_sseu_print_topology(struct drm_i915_private *i915,
+ const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ if (sseu->max_slices == 0) {
+ drm_printf(p, "Unavailable\n");
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ sseu_print_xehp_topology(sseu, p);
+ } else {
+ sseu_print_hsw_topology(sseu, p);
+ }
+}
+
u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice)
{
u16 slice_mask = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index 8a79cd8eaab4..5c078df4729c 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -15,26 +15,49 @@ struct drm_i915_private;
struct intel_gt;
struct drm_printer;
-#define GEN_MAX_SLICES (3) /* SKL upper bound */
-#define GEN_MAX_SUBSLICES (32) /* XEHPSDV upper bound */
-#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
-#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES)
-#define GEN_MAX_EUS (16) /* TGL upper bound */
-#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS)
+/*
+ * Maximum number of slices on older platforms. Slices no longer exist
+ * starting on Xe_HP ("gslices," "cslices," etc. are a different concept and
+ * are not expressed through fusing).
+ */
+#define GEN_MAX_HSW_SLICES 3
+
+/*
+ * Maximum number of subslices that can exist within a HSW-style slice. This
+ * is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
+ * GEN_MAX_DSS value below).
+ */
+#define GEN_MAX_SS_PER_HSW_SLICE 6
+
+/* Maximum number of DSS on newer platforms (Xe_HP and beyond). */
+#define GEN_MAX_DSS 32
+
+/* Maximum number of EUs that can exist within a subslice or DSS. */
+#define GEN_MAX_EUS_PER_SS 16
+
+#define SSEU_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+/* The maximum number of bits needed to express each subslice/DSS independently */
+#define GEN_SS_MASK_SIZE SSEU_MAX(GEN_MAX_DSS, \
+ GEN_MAX_HSW_SLICES * GEN_MAX_SS_PER_HSW_SLICE)
+
+#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
+#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_SS_MASK_SIZE)
+#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS_PER_SS)
#define GEN_DSS_PER_GSLICE 4
#define GEN_DSS_PER_CSLICE 8
#define GEN_DSS_PER_MSLICE 8
-#define GEN_MAX_GSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_GSLICE)
-#define GEN_MAX_CSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_CSLICE)
+#define GEN_MAX_GSLICES (GEN_MAX_DSS / GEN_DSS_PER_GSLICE)
+#define GEN_MAX_CSLICES (GEN_MAX_DSS / GEN_DSS_PER_CSLICE)
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
- u8 geometry_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
- u8 compute_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
- u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE];
+ u8 subslice_mask[GEN_SS_MASK_SIZE];
+ u8 geometry_subslice_mask[GEN_SS_MASK_SIZE];
+ u8 compute_subslice_mask[GEN_SS_MASK_SIZE];
+ u8 eu_mask[GEN_SS_MASK_SIZE * GEN_MAX_EU_STRIDE];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -116,7 +139,8 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
const struct intel_sseu *req_sseu);
void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p);
-void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
+void intel_sseu_print_topology(struct drm_i915_private *i915,
+ const struct sseu_dev_info *sseu,
struct drm_printer *p);
u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
index 903626f106ea..2d5d011e01db 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -4,6 +4,8 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/string_helpers.h>
+
#include "i915_drv.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_regs.h"
@@ -226,16 +228,16 @@ static void i915_print_sseu_info(struct seq_file *m,
if (!is_available_info)
return;
- seq_printf(m, " Has Pooled EU: %s\n", yesno(has_pooled_eu));
+ seq_printf(m, " Has Pooled EU: %s\n", str_yes_no(has_pooled_eu));
if (has_pooled_eu)
seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
seq_printf(m, " Has Slice Power Gating: %s\n",
- yesno(sseu->has_slice_pg));
+ str_yes_no(sseu->has_slice_pg));
seq_printf(m, " Has Subslice Power Gating: %s\n",
- yesno(sseu->has_subslice_pg));
+ str_yes_no(sseu->has_subslice_pg));
seq_printf(m, " Has EU Power Gating: %s\n",
- yesno(sseu->has_eu_pg));
+ str_yes_no(sseu->has_eu_pg));
}
/*
@@ -246,7 +248,7 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
const struct intel_gt_info *info = &gt->info;
- struct sseu_dev_info sseu;
+ struct sseu_dev_info *sseu;
intel_wakeref_t wakeref;
if (GRAPHICS_VER(i915) < 8)
@@ -256,23 +258,29 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu);
seq_puts(m, "SSEU Device Status\n");
- memset(&sseu, 0, sizeof(sseu));
- intel_sseu_set_info(&sseu, info->sseu.max_slices,
+
+ sseu = kzalloc(sizeof(*sseu), GFP_KERNEL);
+ if (!sseu)
+ return -ENOMEM;
+
+ intel_sseu_set_info(sseu, info->sseu.max_slices,
info->sseu.max_subslices,
info->sseu.max_eus_per_subslice);
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
if (IS_CHERRYVIEW(i915))
- cherryview_sseu_device_status(gt, &sseu);
+ cherryview_sseu_device_status(gt, sseu);
else if (IS_BROADWELL(i915))
- bdw_sseu_device_status(gt, &sseu);
+ bdw_sseu_device_status(gt, sseu);
else if (GRAPHICS_VER(i915) == 9)
- gen9_sseu_device_status(gt, &sseu);
+ gen9_sseu_device_status(gt, sseu);
else if (GRAPHICS_VER(i915) >= 11)
- gen11_sseu_device_status(gt, &sseu);
+ gen11_sseu_device_status(gt, sseu);
}
- i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), &sseu);
+ i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), sseu);
+
+ kfree(sseu);
return 0;
}
@@ -285,22 +293,22 @@ static int sseu_status_show(struct seq_file *m, void *unused)
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_status);
-static int rcs_topology_show(struct seq_file *m, void *unused)
+static int sseu_topology_show(struct seq_file *m, void *unused)
{
struct intel_gt *gt = m->private;
struct drm_printer p = drm_seq_file_printer(m);
- intel_sseu_print_topology(&gt->info.sseu, &p);
+ intel_sseu_print_topology(gt->i915, &gt->info.sseu, &p);
return 0;
}
-DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rcs_topology);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_topology);
void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "sseu_status", &sseu_status_fops, NULL },
- { "rcs_topology", &rcs_topology_fops, NULL },
+ { "sseu_topology", &sseu_topology_fops, NULL },
};
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index c014b40d2e9f..a05c4b99b3fb 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1072,9 +1072,15 @@ static void __set_mcr_steering(struct i915_wa_list *wal,
static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
unsigned int slice, unsigned int subslice)
{
- drm_dbg(&gt->i915->drm, "MCR slice=0x%x, subslice=0x%x\n", slice, subslice);
+ struct drm_printer p = drm_debug_printer("MCR Steering:");
__set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
+
+ gt->default_steering.groupid = slice;
+ gt->default_steering.instanceid = subslice;
+
+ if (drm_debug_enabled(DRM_UT_DRIVER))
+ intel_gt_report_steering(&p, gt, false);
}
static void
@@ -2188,11 +2194,15 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+ }
+ if (IS_ALDERLAKE_P(i915) || IS_DG2(i915) || IS_ALDERLAKE_S(i915) ||
+ IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
* Wa_1606700617:tgl,dg1,adl-p
* Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
* Wa_14010826681:tgl,dg1,rkl,adl-p
+ * Wa_18019627453:dg2
*/
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
@@ -2310,7 +2320,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
FF_DOP_CLOCK_GATE_DISABLE);
}
- if (IS_GRAPHICS_VER(i915, 9, 12)) {
+ if (HAS_PERCTX_PREEMPT_CTRL(i915)) {
/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
@@ -2618,6 +2628,11 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
GLOBAL_INVALIDATION_MODE);
}
+
+ if (IS_DG2(i915)) {
+ /* Wa_22014226127:dg2 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
+ }
}
static void
@@ -2633,7 +2648,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
* to a single RCS/CCS engine's workaround list since
* they're reset as part of the general render domain reset.
*/
- if (engine->class == RENDER_CLASS)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
general_render_compute_wa_init(engine, wal);
if (engine->class == RENDER_CLASS)
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 72d5faab8f9a..09f8cd2d0e2c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -1736,15 +1736,9 @@ static int live_preempt(void *arg)
enum intel_engine_id id;
int err = -ENOMEM;
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
- goto err_spin_lo;
+ return -ENOMEM;
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
@@ -1752,6 +1746,12 @@ static int live_preempt(void *arg)
goto err_ctx_hi;
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+ if (igt_spinner_init(&spin_hi, gt))
+ goto err_ctx_lo;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
@@ -1761,14 +1761,14 @@ static int live_preempt(void *arg)
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
i915_request_add(rq);
@@ -1777,7 +1777,7 @@ static int live_preempt(void *arg)
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
@@ -1785,7 +1785,7 @@ static int live_preempt(void *arg)
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
i915_request_add(rq);
@@ -1794,7 +1794,7 @@ static int live_preempt(void *arg)
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
igt_spinner_end(&spin_hi);
@@ -1802,19 +1802,19 @@ static int live_preempt(void *arg)
if (igt_live_test_end(&t)) {
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
}
err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
return err;
}
@@ -1828,20 +1828,20 @@ static int live_late_preempt(void *arg)
enum intel_engine_id id;
int err = -ENOMEM;
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
- goto err_spin_lo;
+ return -ENOMEM;
ctx_lo = kernel_context(gt->i915, NULL);
if (!ctx_lo)
goto err_ctx_hi;
+ if (igt_spinner_init(&spin_hi, gt))
+ goto err_ctx_lo;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
ctx_lo->sched.priority = 1;
@@ -1854,14 +1854,14 @@ static int live_late_preempt(void *arg)
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
i915_request_add(rq);
@@ -1875,7 +1875,7 @@ static int live_late_preempt(void *arg)
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
i915_request_add(rq);
@@ -1898,19 +1898,19 @@ static int live_late_preempt(void *arg)
if (igt_live_test_end(&t)) {
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
}
err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
return err;
err_wedged:
@@ -1918,7 +1918,7 @@ err_wedged:
igt_spinner_end(&spin_lo);
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
struct preempt_client {
@@ -3382,12 +3382,9 @@ static int live_preempt_timeout(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- if (igt_spinner_init(&spin_lo, gt))
- return -ENOMEM;
-
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
- goto err_spin_lo;
+ return -ENOMEM;
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
@@ -3395,6 +3392,9 @@ static int live_preempt_timeout(void *arg)
goto err_ctx_hi;
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_ctx_lo;
+
for_each_engine(engine, gt, id) {
unsigned long saved_timeout;
struct i915_request *rq;
@@ -3406,21 +3406,21 @@ static int live_preempt_timeout(void *arg)
MI_NOOP); /* preemption disabled */
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
rq = igt_request_alloc(ctx_hi, engine);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
/* Flush the previous CS ack before changing timeouts */
@@ -3440,7 +3440,7 @@ static int live_preempt_timeout(void *arg)
intel_gt_set_wedged(gt);
i915_request_put(rq);
err = -ETIME;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
igt_spinner_end(&spin_lo);
@@ -3448,12 +3448,12 @@ static int live_preempt_timeout(void *arg)
}
err = 0;
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 21c29d315cc0..8b2c11dbe354 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -128,6 +128,27 @@ static int context_flush(struct intel_context *ce, long timeout)
return err;
}
+static int get_lri_mask(struct intel_engine_cs *engine, u32 lri)
+{
+ if ((lri & MI_LRI_LRM_CS_MMIO) == 0)
+ return ~0u;
+
+ if (GRAPHICS_VER(engine->i915) < 12)
+ return 0xfff;
+
+ switch (engine->class) {
+ default:
+ case RENDER_CLASS:
+ case COMPUTE_CLASS:
+ return 0x07ff;
+ case COPY_ENGINE_CLASS:
+ return 0x0fff;
+ case VIDEO_DECODE_CLASS:
+ case VIDEO_ENHANCEMENT_CLASS:
+ return 0x3fff;
+ }
+}
+
static int live_lrc_layout(void *arg)
{
struct intel_gt *gt = arg;
@@ -167,6 +188,7 @@ static int live_lrc_layout(void *arg)
dw = 0;
do {
u32 lri = READ_ONCE(hw[dw]);
+ u32 lri_mask;
if (lri == 0) {
dw++;
@@ -194,6 +216,18 @@ static int live_lrc_layout(void *arg)
break;
}
+ /*
+ * When bit 19 of MI_LOAD_REGISTER_IMM instruction
+ * opcode is set on Gen12+ devices, HW does not
+ * care about certain register address offsets, and
+ * instead check the following for valid address
+ * ranges on specific engines:
+ * RCS && CCS: BITS(0 - 10)
+ * BCS: BITS(0 - 11)
+ * VECS && VCS: BITS(0 - 13)
+ */
+ lri_mask = get_lri_mask(engine, lri);
+
lri &= 0x7f;
lri++;
dw++;
@@ -201,7 +235,7 @@ static int live_lrc_layout(void *arg)
while (lri) {
u32 offset = READ_ONCE(hw[dw]);
- if (offset != lrc[dw]) {
+ if ((offset ^ lrc[dw]) & lri_mask) {
pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
engine->name, dw, offset, lrc[dw]);
err = -EINVAL;
@@ -911,6 +945,19 @@ create_user_vma(struct i915_address_space *vm, unsigned long size)
return vma;
}
+static u32 safe_poison(u32 offset, u32 poison)
+{
+ /*
+ * Do not enable predication as it will nop all subsequent commands,
+ * not only disabling the tests (by preventing all the other SRM) but
+ * also preventing the arbitration events at the end of the request.
+ */
+ if (offset == i915_mmio_reg_offset(RING_PREDICATE_RESULT(0)))
+ poison &= ~REG_BIT(0);
+
+ return poison;
+}
+
static struct i915_vma *
store_context(struct intel_context *ce, struct i915_vma *scratch)
{
@@ -1120,7 +1167,9 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
*cs++ = MI_LOAD_REGISTER_IMM(len);
while (len--) {
*cs++ = hw[dw];
- *cs++ = poison;
+ *cs++ = safe_poison(hw[dw] & get_lri_mask(ce->engine,
+ MI_LRI_LRM_CS_MMIO),
+ poison);
dw += 2;
}
} while (dw < PAGE_SIZE / sizeof(u32) &&
@@ -1753,8 +1802,8 @@ static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
if (IS_ERR(ce))
return PTR_ERR(ce);
- ce->runtime.num_underflow = 0;
- ce->runtime.max_underflow = 0;
+ ce->stats.runtime.num_underflow = 0;
+ ce->stats.runtime.max_underflow = 0;
do {
unsigned int loop = 1024;
@@ -1792,11 +1841,11 @@ static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
intel_context_get_avg_runtime_ns(ce));
err = 0;
- if (ce->runtime.num_underflow) {
+ if (ce->stats.runtime.num_underflow) {
pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
engine->name,
- ce->runtime.num_underflow,
- ce->runtime.max_underflow);
+ ce->stats.runtime.num_underflow,
+ ce->stats.runtime.max_underflow);
GEM_TRACE_DUMP();
err = -EOVERFLOW;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index c9c4f391c5cc..2b0c87999949 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -132,6 +132,124 @@ err_free_src:
return err;
}
+static int intel_context_copy_ccs(struct intel_context *ce,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool write_to_ccs,
+ struct i915_request **out)
+{
+ u8 src_access = write_to_ccs ? DIRECT_ACCESS : INDIRECT_ACCESS;
+ u8 dst_access = write_to_ccs ? INDIRECT_ACCESS : DIRECT_ACCESS;
+ struct sgt_dma it = sg_sgt(sg);
+ struct i915_request *rq;
+ u32 offset;
+ int err;
+
+ GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
+ *out = NULL;
+
+ GEM_BUG_ON(ce->ring->size < SZ_64K);
+
+ offset = 0;
+ if (HAS_64K_PAGES(ce->engine->i915))
+ offset = CHUNK_SZ;
+
+ do {
+ int len;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ if (deps) {
+ err = i915_request_await_deps(rq, deps);
+ if (err)
+ goto out_rq;
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ deps = NULL;
+ }
+
+ /* The PTE updates + clear must not be interrupted. */
+ err = emit_no_arbitration(rq);
+ if (err)
+ goto out_rq;
+
+ len = emit_pte(rq, &it, cache_level, true, offset, CHUNK_SZ);
+ if (len <= 0) {
+ err = len;
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ err = emit_copy_ccs(rq, offset, dst_access,
+ offset, src_access, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+
+ /* Arbitration is re-enabled between requests. */
+out_rq:
+ if (*out)
+ i915_request_put(*out);
+ *out = i915_request_get(rq);
+ i915_request_add(rq);
+ if (err || !it.sg || !sg_dma_len(it.sg))
+ break;
+
+ cond_resched();
+ } while (1);
+
+out_ce:
+ return err;
+}
+
+static int
+intel_migrate_ccs_copy(struct intel_migrate *m,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool write_to_ccs,
+ struct i915_request **out)
+{
+ struct intel_context *ce;
+ int err;
+
+ *out = NULL;
+ if (!m->context)
+ return -ENODEV;
+
+ ce = intel_migrate_create_context(m);
+ if (IS_ERR(ce))
+ ce = intel_context_get(m->context);
+ GEM_BUG_ON(IS_ERR(ce));
+
+ err = intel_context_pin_ww(ce, ww);
+ if (err)
+ goto out;
+
+ err = intel_context_copy_ccs(ce, deps, sg, cache_level,
+ write_to_ccs, out);
+
+ intel_context_unpin(ce);
+out:
+ intel_context_put(ce);
+ return err;
+}
+
static int clear(struct intel_migrate *migrate,
int (*fn)(struct intel_migrate *migrate,
struct i915_gem_ww_ctx *ww,
@@ -144,7 +262,8 @@ static int clear(struct intel_migrate *migrate,
struct drm_i915_gem_object *obj;
struct i915_request *rq;
struct i915_gem_ww_ctx ww;
- u32 *vaddr;
+ u32 *vaddr, val = 0;
+ bool ccs_cap = false;
int err = 0;
int i;
@@ -152,7 +271,15 @@ static int clear(struct intel_migrate *migrate,
if (IS_ERR(obj))
return 0;
+ /* Consider the rounded up memory too */
+ sz = obj->base.size;
+
+ if (HAS_FLAT_CCS(i915) && i915_gem_object_is_lmem(obj))
+ ccs_cap = true;
+
for_i915_gem_ww(&ww, err, true) {
+ int ccs_bytes, ccs_bytes_per_chunk;
+
err = i915_gem_object_lock(obj, &ww);
if (err)
continue;
@@ -167,44 +294,114 @@ static int clear(struct intel_migrate *migrate,
vaddr[i] = ~i;
i915_gem_object_flush_map(obj);
- err = fn(migrate, &ww, obj, sz, &rq);
- if (!err)
- continue;
+ if (ccs_cap && !val) {
+ /* Write the obj data into ccs surface */
+ err = intel_migrate_ccs_copy(migrate, &ww, NULL,
+ obj->mm.pages->sgl,
+ obj->cache_level,
+ true, &rq);
+ if (rq && !err) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ pr_err("%ps timed out, size: %u\n",
+ fn, sz);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+ rq = NULL;
+ }
+ if (err)
+ continue;
+ }
- if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS)
- pr_err("%ps failed, size: %u\n", fn, sz);
- if (rq) {
- i915_request_wait(rq, 0, HZ);
+ err = fn(migrate, &ww, obj, val, &rq);
+ if (rq && !err) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ pr_err("%ps timed out, size: %u\n", fn, sz);
+ err = -ETIME;
+ }
i915_request_put(rq);
+ rq = NULL;
}
- i915_gem_object_unpin_map(obj);
- }
- if (err)
- goto err_out;
+ if (err)
+ continue;
- if (rq) {
- if (i915_request_wait(rq, 0, HZ) < 0) {
- pr_err("%ps timed out, size: %u\n", fn, sz);
- err = -ETIME;
+ i915_gem_object_flush_map(obj);
+
+ /* Verify the set/clear of the obj mem */
+ for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
+ int x = i * 1024 +
+ i915_prandom_u32_max_state(1024, prng);
+
+ if (vaddr[x] != val) {
+ pr_err("%ps failed, (%u != %u), offset: %zu\n",
+ fn, vaddr[x], val, x * sizeof(u32));
+ igt_hexdump(vaddr + i * 1024, 4096);
+ err = -EINVAL;
+ }
}
- i915_request_put(rq);
- }
+ if (err)
+ continue;
- for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
- int x = i * 1024 + i915_prandom_u32_max_state(1024, prng);
+ if (ccs_cap && !val) {
+ for (i = 0; i < sz / sizeof(u32); i++)
+ vaddr[i] = ~i;
+ i915_gem_object_flush_map(obj);
+
+ err = intel_migrate_ccs_copy(migrate, &ww, NULL,
+ obj->mm.pages->sgl,
+ obj->cache_level,
+ false, &rq);
+ if (rq && !err) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ pr_err("%ps timed out, size: %u\n",
+ fn, sz);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+ rq = NULL;
+ }
+ if (err)
+ continue;
+
+ ccs_bytes = GET_CCS_BYTES(i915, sz);
+ ccs_bytes_per_chunk = GET_CCS_BYTES(i915, CHUNK_SZ);
+ i915_gem_object_flush_map(obj);
+
+ for (i = 0; !err && i < DIV_ROUND_UP(ccs_bytes, PAGE_SIZE); i++) {
+ int offset = ((i * PAGE_SIZE) /
+ ccs_bytes_per_chunk) * CHUNK_SZ / sizeof(u32);
+ int ccs_bytes_left = (ccs_bytes - i * PAGE_SIZE) / sizeof(u32);
+ int x = i915_prandom_u32_max_state(min_t(int, 1024,
+ ccs_bytes_left), prng);
+
+ if (vaddr[offset + x]) {
+ pr_err("%ps ccs clearing failed, offset: %ld/%d\n",
+ fn, i * PAGE_SIZE + x * sizeof(u32), ccs_bytes);
+ igt_hexdump(vaddr + offset,
+ min_t(int, 4096,
+ ccs_bytes_left * sizeof(u32)));
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ continue;
+ }
+ i915_gem_object_unpin_map(obj);
+ }
- if (vaddr[x] != sz) {
- pr_err("%ps failed, size: %u, offset: %zu\n",
- fn, sz, x * sizeof(u32));
- igt_hexdump(vaddr + i * 1024, 4096);
- err = -EINVAL;
+ if (err) {
+ if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS)
+ pr_err("%ps failed, size: %u\n", fn, sz);
+ if (rq && err != -EINVAL) {
+ i915_request_wait(rq, 0, HZ);
+ i915_request_put(rq);
}
+
+ i915_gem_object_unpin_map(obj);
}
- i915_gem_object_unpin_map(obj);
-err_out:
i915_gem_object_put(obj);
-
return err;
}
@@ -621,13 +818,15 @@ static int perf_copy_blt(void *arg)
for (i = 0; i < ARRAY_SIZE(sizes); i++) {
struct drm_i915_gem_object *src, *dst;
+ size_t sz;
int err;
src = create_init_lmem_internal(gt, sizes[i], true);
if (IS_ERR(src))
return PTR_ERR(src);
- dst = create_init_lmem_internal(gt, sizes[i], false);
+ sz = src->base.size;
+ dst = create_init_lmem_internal(gt, sz, false);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto err_src;
@@ -640,7 +839,7 @@ static int perf_copy_blt(void *arg)
dst->mm.pages->sgl,
I915_CACHE_NONE,
i915_gem_object_is_lmem(dst),
- sizes[i]);
+ sz);
i915_gem_object_unlock(dst);
i915_gem_object_put(dst);
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 0410c402f2a3..522d0190509c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -4,6 +4,7 @@
*/
#include <linux/prime_numbers.h>
+#include <linux/string_helpers.h>
#include "intel_context.h"
#include "intel_engine_heartbeat.h"
@@ -209,7 +210,7 @@ static int __igt_sync(struct intel_timeline *tl,
if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
- name, p->name, ctx, p->seqno, yesno(p->expected));
+ name, p->name, ctx, p->seqno, str_yes_no(p->expected));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 7afdadc7656f..4ef9990ed7f8 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -50,7 +50,7 @@
#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffffU << 16)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn
#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn
@@ -122,17 +122,14 @@ enum intel_guc_action {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
- INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY = 0x1005,
- INTEL_GUC_ACTION_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
- INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
+ INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
INTEL_GUC_ACTION_SETUP_PC_GUCRC = 0x3004,
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
+ INTEL_GUC_ACTION_GET_HWCONFIG = 0x4100,
INTEL_GUC_ACTION_REGISTER_CONTEXT = 0x4502,
INTEL_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503,
- INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
- INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
@@ -173,4 +170,11 @@ enum intel_guc_sleep_state_status {
#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
+enum intel_guc_state_capture_event_status {
+ INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_SUCCESS = 0x0,
+ INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE = 0x1,
+};
+
+#define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
+
#endif /* _ABI_GUC_ACTIONS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
index c9086a600bce..df83c1cc7c7a 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
@@ -82,7 +82,7 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
#define GUC_CTB_HDR_LEN 1u
#define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN
#define GUC_CTB_MSG_MAX_LEN 256u
-#define GUC_CTB_MSG_0_FENCE (0xffff << 16)
+#define GUC_CTB_MSG_0_FENCE (0xffffU << 16)
#define GUC_CTB_MSG_0_FORMAT (0xf << 12)
#define GUC_CTB_FORMAT_HXG 0u
#define GUC_CTB_MSG_0_RESERVED (0xf << 8)
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index c20658ee85a5..8085fb181274 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -8,6 +8,10 @@
enum intel_guc_response_status {
INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+ INTEL_GUC_RESPONSE_NOT_SUPPORTED = 0x20,
+ INTEL_GUC_RESPONSE_NO_ATTRIBUTE_TABLE = 0x201,
+ INTEL_GUC_RESPONSE_NO_DECRYPTION_KEY = 0x202,
+ INTEL_GUC_RESPONSE_DECRYPTION_FAILED = 0x204,
INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
};
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index f0814a57c191..4a59478c3b5c 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -6,6 +6,8 @@
#ifndef _ABI_GUC_KLVS_ABI_H
#define _ABI_GUC_KLVS_ABI_H
+#include <linux/types.h>
+
/**
* DOC: GuC KLV
*
@@ -79,4 +81,17 @@
#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY 0x0907
#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u
+/*
+ * Per context scheduling policy update keys.
+ */
+enum {
+ GUC_CONTEXT_POLICIES_KLV_ID_EXECUTION_QUANTUM = 0x2001,
+ GUC_CONTEXT_POLICIES_KLV_ID_PREEMPTION_TIMEOUT = 0x2002,
+ GUC_CONTEXT_POLICIES_KLV_ID_SCHEDULING_PRIORITY = 0x2003,
+ GUC_CONTEXT_POLICIES_KLV_ID_PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY = 0x2004,
+ GUC_CONTEXT_POLICIES_KLV_ID_SLPM_GT_FREQUENCY = 0x2005,
+
+ GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5,
+};
+
#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
index 29ac823acd4c..7d5ba4d97d70 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
@@ -40,7 +40,7 @@
*/
#define GUC_HXG_MSG_MIN_LEN 1u
-#define GUC_HXG_MSG_0_ORIGIN (0x1 << 31)
+#define GUC_HXG_MSG_0_ORIGIN (0x1U << 31)
#define GUC_HXG_ORIGIN_HOST 0u
#define GUC_HXG_ORIGIN_GUC 1u
#define GUC_HXG_MSG_0_TYPE (0x7 << 28)
diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
new file mode 100644
index 000000000000..3624abfd22d1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2022 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_CAPTURE_FWIF_H
+#define _INTEL_GUC_CAPTURE_FWIF_H
+
+#include <linux/types.h>
+#include "intel_guc_fwif.h"
+
+struct intel_guc;
+struct file;
+
+/**
+ * struct __guc_capture_bufstate
+ *
+ * Book-keeping structure used to track read and write pointers
+ * as we extract error capture data from the GuC-log-buffer's
+ * error-capture region as a stream of dwords.
+ */
+struct __guc_capture_bufstate {
+ u32 size;
+ void *data;
+ u32 rd;
+ u32 wr;
+};
+
+/**
+ * struct __guc_capture_parsed_output - extracted error capture node
+ *
+ * A single unit of extracted error-capture output data grouped together
+ * at an engine-instance level. We keep these nodes in a linked list.
+ * See cachelist and outlist below.
+ */
+struct __guc_capture_parsed_output {
+ /*
+ * A single set of 3 capture lists: a global-list
+ * an engine-class-list and an engine-instance list.
+ * outlist in __guc_capture_parsed_output will keep
+ * a linked list of these nodes that will eventually
+ * be detached from outlist and attached into to
+ * i915_gpu_codedump in response to a context reset
+ */
+ struct list_head link;
+ bool is_partial;
+ u32 eng_class;
+ u32 eng_inst;
+ u32 guc_id;
+ u32 lrca;
+ struct gcap_reg_list_info {
+ u32 vfid;
+ u32 num_regs;
+ struct guc_mmio_reg *regs;
+ } reginfo[GUC_CAPTURE_LIST_TYPE_MAX];
+#define GCAP_PARSED_REGLIST_INDEX_GLOBAL BIT(GUC_CAPTURE_LIST_TYPE_GLOBAL)
+#define GCAP_PARSED_REGLIST_INDEX_ENGCLASS BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS)
+#define GCAP_PARSED_REGLIST_INDEX_ENGINST BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE)
+};
+
+/**
+ * struct guc_debug_capture_list_header / struct guc_debug_capture_list
+ *
+ * As part of ADS registration, these header structures (followed by
+ * an array of 'struct guc_mmio_reg' entries) are used to register with
+ * GuC microkernel the list of registers we want it to dump out prior
+ * to a engine reset.
+ */
+struct guc_debug_capture_list_header {
+ u32 info;
+#define GUC_CAPTURELISTHDR_NUMDESCR GENMASK(15, 0)
+} __packed;
+
+struct guc_debug_capture_list {
+ struct guc_debug_capture_list_header header;
+ struct guc_mmio_reg regs[0];
+} __packed;
+
+/**
+ * struct __guc_mmio_reg_descr / struct __guc_mmio_reg_descr_group
+ *
+ * intel_guc_capture module uses these structures to maintain static
+ * tables (per unique platform) that consists of lists of registers
+ * (offsets, names, flags,...) that are used at the ADS regisration
+ * time as well as during runtime processing and reporting of error-
+ * capture states generated by GuC just prior to engine reset events.
+ */
+struct __guc_mmio_reg_descr {
+ i915_reg_t reg;
+ u32 flags;
+ u32 mask;
+ const char *regname;
+};
+
+struct __guc_mmio_reg_descr_group {
+ const struct __guc_mmio_reg_descr *list;
+ u32 num_regs;
+ u32 owner; /* see enum guc_capture_owner */
+ u32 type; /* see enum guc_capture_type */
+ u32 engine; /* as per MAX_ENGINE_CLASS */
+ struct __guc_mmio_reg_descr *extlist; /* only used for steered registers */
+};
+
+/**
+ * struct guc_state_capture_header_t / struct guc_state_capture_t /
+ * guc_state_capture_group_header_t / guc_state_capture_group_t
+ *
+ * Prior to resetting engines that have hung or faulted, GuC microkernel
+ * reports the engine error-state (register values that was read) by
+ * logging them into the shared GuC log buffer using these hierarchy
+ * of structures.
+ */
+struct guc_state_capture_header_t {
+ u32 owner;
+#define CAP_HDR_CAPTURE_VFID GENMASK(7, 0)
+ u32 info;
+#define CAP_HDR_CAPTURE_TYPE GENMASK(3, 0) /* see enum guc_capture_type */
+#define CAP_HDR_ENGINE_CLASS GENMASK(7, 4) /* see GUC_MAX_ENGINE_CLASSES */
+#define CAP_HDR_ENGINE_INSTANCE GENMASK(11, 8)
+ u32 lrca; /* if type-instance, LRCA (address) that hung, else set to ~0 */
+ u32 guc_id; /* if type-instance, context index of hung context, else set to ~0 */
+ u32 num_mmios;
+#define CAP_HDR_NUM_MMIOS GENMASK(9, 0)
+} __packed;
+
+struct guc_state_capture_t {
+ struct guc_state_capture_header_t header;
+ struct guc_mmio_reg mmio_entries[0];
+} __packed;
+
+enum guc_capture_group_types {
+ GUC_STATE_CAPTURE_GROUP_TYPE_FULL,
+ GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL,
+ GUC_STATE_CAPTURE_GROUP_TYPE_MAX,
+};
+
+struct guc_state_capture_group_header_t {
+ u32 owner;
+#define CAP_GRP_HDR_CAPTURE_VFID GENMASK(7, 0)
+ u32 info;
+#define CAP_GRP_HDR_NUM_CAPTURES GENMASK(7, 0)
+#define CAP_GRP_HDR_CAPTURE_TYPE GENMASK(15, 8) /* guc_capture_group_types */
+} __packed;
+
+/* this is the top level structure where an error-capture dump starts */
+struct guc_state_capture_group_t {
+ struct guc_state_capture_group_header_t grp_header;
+ struct guc_state_capture_t capture_entries[0];
+} __packed;
+
+/**
+ * struct __guc_capture_ads_cache
+ *
+ * A structure to cache register lists that were populated and registered
+ * with GuC at startup during ADS registration. This allows much quicker
+ * GuC resets without re-parsing all the tables for the given gt.
+ */
+struct __guc_capture_ads_cache {
+ bool is_valid;
+ void *ptr;
+ size_t size;
+ int status;
+};
+
+/**
+ * struct intel_guc_state_capture
+ *
+ * Internal context of the intel_guc_capture module.
+ */
+struct intel_guc_state_capture {
+ /**
+ * @reglists: static table of register lists used for error-capture state.
+ */
+ const struct __guc_mmio_reg_descr_group *reglists;
+
+ /**
+ * @extlists: allocated table of steered register lists used for error-capture state.
+ *
+ * NOTE: steered registers have multiple instances depending on the HW configuration
+ * (slices or dual-sub-slices) and thus depends on HW fuses discovered at startup
+ */
+ struct __guc_mmio_reg_descr_group *extlists;
+
+ /**
+ * @ads_cache: cached register lists that is ADS format ready
+ */
+ struct __guc_capture_ads_cache ads_cache[GUC_CAPTURE_LIST_INDEX_MAX]
+ [GUC_CAPTURE_LIST_TYPE_MAX]
+ [GUC_MAX_ENGINE_CLASSES];
+ void *ads_null_cache;
+
+ /**
+ * @cachelist: Pool of pre-allocated nodes for error capture output
+ *
+ * We need this pool of pre-allocated nodes because we cannot
+ * dynamically allocate new nodes when receiving the G2H notification
+ * because the event handlers for all G2H event-processing is called
+ * by the ct processing worker queue and when that queue is being
+ * processed, there is no absoluate guarantee that we are not in the
+ * midst of a GT reset operation (which doesn't allow allocations).
+ */
+ struct list_head cachelist;
+#define PREALLOC_NODES_MAX_COUNT (3 * GUC_MAX_ENGINE_CLASSES * GUC_MAX_INSTANCES_PER_CLASS)
+#define PREALLOC_NODES_DEFAULT_NUMREGS 64
+ int max_mmio_per_node;
+
+ /**
+ * @outlist: Pool of pre-allocated nodes for error capture output
+ *
+ * A linked list of parsed GuC error-capture output data before
+ * reporting with formatting via i915_gpu_coredump. Each node in this linked list shall
+ * contain a single engine-capture including global, engine-class and
+ * engine-instance register dumps as per guc_capture_parsed_output_node
+ */
+ struct list_head outlist;
+};
+
+#endif /* _INTEL_GUC_CAPTURE_FWIF_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 447a976c9f25..2c4ad4a65089 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -9,8 +9,9 @@
#include "gt/intel_gt_pm_irq.h"
#include "gt/intel_gt_regs.h"
#include "intel_guc.h"
-#include "intel_guc_slpc.h"
#include "intel_guc_ads.h"
+#include "intel_guc_capture.h"
+#include "intel_guc_slpc.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -291,6 +292,41 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
flags |= GUC_WA_POLLCS;
+ /* Wa_16011759253:dg2_g10:a0 */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
+ flags |= GUC_WA_GAM_CREDITS;
+
+ /* Wa_14014475959:dg2 */
+ if (IS_DG2(gt->i915))
+ flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+
+ /*
+ * Wa_14012197797:dg2_g10:a0,dg2_g11:a0
+ * Wa_22011391025:dg2_g10,dg2_g11,dg2_g12
+ *
+ * The same WA bit is used for both and 22011391025 is applicable to
+ * all DG2.
+ */
+ if (IS_DG2(gt->i915))
+ flags |= GUC_WA_DUAL_QUEUE;
+
+ /* Wa_22011802037: graphics version 12 */
+ if (GRAPHICS_VER(gt->i915) == 12)
+ flags |= GUC_WA_PRE_PARSER;
+
+ /* Wa_16011777198:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
+ flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
+
+ /*
+ * Wa_22012727170:dg2_g10[a0-c0), dg2_g11[a0..)
+ * Wa_22012727685:dg2_g11[a0..)
+ */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_FOREVER))
+ flags |= GUC_WA_CONTEXT_ISOLATION;
+
return flags;
}
@@ -362,9 +398,14 @@ int intel_guc_init(struct intel_guc *guc)
if (ret)
goto err_fw;
- ret = intel_guc_ads_create(guc);
+ ret = intel_guc_capture_init(guc);
if (ret)
goto err_log;
+
+ ret = intel_guc_ads_create(guc);
+ if (ret)
+ goto err_capture;
+
GEM_BUG_ON(!guc->ads_vma);
ret = intel_guc_ct_init(&guc->ct);
@@ -403,6 +444,8 @@ err_ct:
intel_guc_ct_fini(&guc->ct);
err_ads:
intel_guc_ads_destroy(guc);
+err_capture:
+ intel_guc_capture_destroy(guc);
err_log:
intel_guc_log_destroy(&guc->log);
err_fw:
@@ -430,6 +473,7 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ct_fini(&guc->ct);
intel_guc_ads_destroy(guc);
+ intel_guc_capture_destroy(guc);
intel_guc_log_destroy(&guc->log);
intel_uc_fw_fini(&guc->fw);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2488d1197f3e..966e69a8b1c1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -10,18 +10,19 @@
#include <linux/iosys-map.h>
#include <linux/xarray.h>
-#include "intel_uncore.h"
+#include "intel_guc_ct.h"
#include "intel_guc_fw.h"
#include "intel_guc_fwif.h"
-#include "intel_guc_ct.h"
#include "intel_guc_log.h"
#include "intel_guc_reg.h"
#include "intel_guc_slpc_types.h"
#include "intel_uc_fw.h"
+#include "intel_uncore.h"
#include "i915_utils.h"
#include "i915_vma.h"
struct __guc_ads_blob;
+struct intel_guc_state_capture;
/**
* struct intel_guc - Top level structure of GuC.
@@ -38,6 +39,8 @@ struct intel_guc {
struct intel_guc_ct ct;
/** @slpc: sub-structure containing SLPC related data and objects */
struct intel_guc_slpc slpc;
+ /** @capture: the error-state-capture module's data and objects */
+ struct intel_guc_state_capture *capture;
/** @sched_engine: Global engine used to submit requests to GuC */
struct i915_sched_engine *sched_engine;
@@ -138,6 +141,8 @@ struct intel_guc {
bool submission_supported;
/** @submission_selected: tracks whether the user enabled GuC submission */
bool submission_selected;
+ /** @submission_initialized: tracks whether GuC submission has been initialised */
+ bool submission_initialized;
/**
* @rc_supported: tracks whether we support GuC rc on the current platform
*/
@@ -160,14 +165,11 @@ struct intel_guc {
struct guc_mmio_reg *ads_regset;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @ads_capture_size: size of register lists in the ADS used for error capture */
+ u32 ads_capture_size;
/** @ads_engine_usage_size: size of engine usage in the ADS */
u32 ads_engine_usage_size;
- /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
- struct i915_vma *lrc_desc_pool;
- /** @lrc_desc_pool_vaddr: contents of the GuC LRC descriptor pool */
- void *lrc_desc_pool_vaddr;
-
/**
* @context_lookup: used to resolve intel_context from guc_id, if a
* context is present in this structure it is registered with the GuC
@@ -431,6 +433,9 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
int intel_guc_error_capture_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len);
+struct intel_engine_cs *
+intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance);
+
void intel_guc_find_hung_context(struct intel_engine_cs *engine);
int intel_guc_global_policies_update(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 92cb88248391..3eabf4cf8eec 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -11,6 +11,7 @@
#include "gt/intel_lrc.h"
#include "gt/shmem_utils.h"
#include "intel_guc_ads.h"
+#include "intel_guc_capture.h"
#include "intel_guc_fwif.h"
#include "intel_uc.h"
#include "i915_drv.h"
@@ -86,8 +87,7 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
static u32 guc_ads_capture_size(struct intel_guc *guc)
{
- /* FIXME: Allocate a proper capture list */
- return PAGE_ALIGN(PAGE_SIZE);
+ return PAGE_ALIGN(guc->ads_capture_size);
}
static u32 guc_ads_private_data_size(struct intel_guc *guc)
@@ -276,15 +276,24 @@ __mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg)
return slot;
}
-static long __must_check guc_mmio_reg_add(struct temp_regset *regset,
- u32 offset, u32 flags)
+#define GUC_REGSET_STEERING(group, instance) ( \
+ FIELD_PREP(GUC_REGSET_STEERING_GROUP, (group)) | \
+ FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, (instance)) | \
+ GUC_REGSET_NEEDS_STEERING \
+)
+
+static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
+ struct temp_regset *regset,
+ i915_reg_t reg, u32 flags)
{
u32 count = regset->storage_used - (regset->registers - regset->storage);
- struct guc_mmio_reg reg = {
+ u32 offset = i915_mmio_reg_offset(reg);
+ struct guc_mmio_reg entry = {
.offset = offset,
.flags = flags,
};
struct guc_mmio_reg *slot;
+ u8 group, inst;
/*
* The mmio list is built using separate lists within the driver.
@@ -292,11 +301,22 @@ static long __must_check guc_mmio_reg_add(struct temp_regset *regset,
* register more than once. Do not consider this an error; silently
* move on if the register is already in the list.
*/
- if (bsearch(&reg, regset->registers, count,
- sizeof(reg), guc_mmio_reg_cmp))
+ if (bsearch(&entry, regset->registers, count,
+ sizeof(entry), guc_mmio_reg_cmp))
return 0;
- slot = __mmio_reg_add(regset, &reg);
+ /*
+ * The GuC doesn't have a default steering, so we need to explicitly
+ * steer all registers that need steering. However, we do not keep track
+ * of all the steering ranges, only of those that have a chance of using
+ * a non-default steering from the i915 pov. Instead of adding such
+ * tracking, it is easier to just program the default steering for all
+ * regs that don't need a non-default one.
+ */
+ intel_gt_get_valid_steering_for_reg(gt, reg, &group, &inst);
+ entry.flags |= GUC_REGSET_STEERING(group, inst);
+
+ slot = __mmio_reg_add(regset, &entry);
if (IS_ERR(slot))
return PTR_ERR(slot);
@@ -311,14 +331,16 @@ static long __must_check guc_mmio_reg_add(struct temp_regset *regset,
return 0;
}
-#define GUC_MMIO_REG_ADD(regset, reg, masked) \
- guc_mmio_reg_add(regset, \
- i915_mmio_reg_offset((reg)), \
+#define GUC_MMIO_REG_ADD(gt, regset, reg, masked) \
+ guc_mmio_reg_add(gt, \
+ regset, \
+ (reg), \
(masked) ? GUC_REGSET_MASKED : 0)
static int guc_mmio_regset_init(struct temp_regset *regset,
struct intel_engine_cs *engine)
{
+ struct intel_gt *gt = engine->gt;
const u32 base = engine->mmio_base;
struct i915_wa_list *wal = &engine->wa_list;
struct i915_wa *wa;
@@ -331,26 +353,26 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
*/
regset->registers = regset->storage + regset->storage_used;
- ret |= GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true);
- ret |= GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false);
- ret |= GUC_MMIO_REG_ADD(regset, RING_IMR(base), false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, RING_MODE_GEN7(base), true);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, RING_HWS_PGA(base), false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, RING_IMR(base), false);
- if (engine->class == RENDER_CLASS &&
+ if ((engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) &&
CCS_MASK(engine->gt))
- ret |= GUC_MMIO_REG_ADD(regset, GEN12_RCU_MODE, true);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- ret |= GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg);
/* Be extra paranoid and include all whitelist registers. */
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
- ret |= GUC_MMIO_REG_ADD(regset,
+ ret |= GUC_MMIO_REG_ADD(gt, regset,
RING_FORCE_TO_NONPRIV(base, i),
false);
/* add in local MOCS registers */
for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++)
- ret |= GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
return ret ? -1 : 0;
}
@@ -433,7 +455,7 @@ static void guc_mmio_reg_state_init(struct intel_guc *guc)
static void fill_engine_enable_masks(struct intel_gt *gt,
struct iosys_map *info_map)
{
- info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], 1);
+ info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], RCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS], CCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], 1);
info_map_write(info_map, engine_enabled_masks[GUC_VIDEO_CLASS], VDBOX_MASK(gt));
@@ -589,24 +611,119 @@ static void guc_init_golden_context(struct intel_guc *guc)
GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
}
-static void guc_capture_list_init(struct intel_guc *guc)
+static int
+guc_capture_prep_lists(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
+ struct guc_gt_system_info local_info;
+ struct iosys_map info_map;
+ bool ads_is_mapped;
+ size_t size = 0;
+ void *ptr;
int i, j;
- u32 addr_ggtt, offset;
- offset = guc_ads_capture_offset(guc);
- addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+ ads_is_mapped = !iosys_map_is_null(&guc->ads_map);
+ if (ads_is_mapped) {
+ capture_offset = guc_ads_capture_offset(guc);
+ ads_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma);
+ info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
+ offsetof(struct __guc_ads_blob, system_info));
+ } else {
+ memset(&local_info, 0, sizeof(local_info));
+ iosys_map_set_vaddr(&info_map, &local_info);
+ fill_engine_enable_masks(gt, &info_map);
+ }
- /* FIXME: Populate a proper capture list */
+ /* first, set aside the first page for a capture_list with zero descriptors */
+ total_size = PAGE_SIZE;
+ if (ads_is_mapped) {
+ if (!intel_guc_capture_getnullheader(guc, &ptr, &size))
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ null_ggtt = ads_ggtt + capture_offset;
+ capture_offset += PAGE_SIZE;
+ }
for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
- ads_blob_write(guc, ads.capture_instance[i][j], addr_ggtt);
- ads_blob_write(guc, ads.capture_class[i][j], addr_ggtt);
- }
- ads_blob_write(guc, ads.capture_global[i], addr_ggtt);
+ /* null list if we dont have said engine or list */
+ if (!info_map_read(&info_map, engine_enabled_masks[j])) {
+ if (ads_is_mapped) {
+ ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
+ ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
+ }
+ continue;
+ }
+ if (intel_guc_capture_getlistsize(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ j, &size)) {
+ if (ads_is_mapped)
+ ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
+ goto engine_instance_list;
+ }
+ total_size += size;
+ if (ads_is_mapped) {
+ if (total_size > guc->ads_capture_size ||
+ intel_guc_capture_getlist(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ j, &ptr)) {
+ ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
+ continue;
+ }
+ ads_blob_write(guc, ads.capture_class[i][j], ads_ggtt +
+ capture_offset);
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ capture_offset += size;
+ }
+engine_instance_list:
+ if (intel_guc_capture_getlistsize(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ j, &size)) {
+ if (ads_is_mapped)
+ ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
+ continue;
+ }
+ total_size += size;
+ if (ads_is_mapped) {
+ if (total_size > guc->ads_capture_size ||
+ intel_guc_capture_getlist(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ j, &ptr)) {
+ ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
+ continue;
+ }
+ ads_blob_write(guc, ads.capture_instance[i][j], ads_ggtt +
+ capture_offset);
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ capture_offset += size;
+ }
+ }
+ if (intel_guc_capture_getlistsize(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &size)) {
+ if (ads_is_mapped)
+ ads_blob_write(guc, ads.capture_global[i], null_ggtt);
+ continue;
+ }
+ total_size += size;
+ if (ads_is_mapped) {
+ if (total_size > guc->ads_capture_size ||
+ intel_guc_capture_getlist(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0,
+ &ptr)) {
+ ads_blob_write(guc, ads.capture_global[i], null_ggtt);
+ continue;
+ }
+ ads_blob_write(guc, ads.capture_global[i], ads_ggtt + capture_offset);
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ capture_offset += size;
+ }
}
+
+ if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size))
+ drm_warn(&i915->drm, "GuC->ADS->Capture alloc size changed from %d to %d\n",
+ guc->ads_capture_size, PAGE_ALIGN(total_size));
+
+ return PAGE_ALIGN(total_size);
}
static void __guc_ads_init(struct intel_guc *guc)
@@ -644,8 +761,8 @@ static void __guc_ads_init(struct intel_guc *guc)
base = intel_guc_ggtt_offset(guc, guc->ads_vma);
- /* Capture list for hang debug */
- guc_capture_list_init(guc);
+ /* Lists for error capture debug */
+ guc_capture_prep_lists(guc);
/* ADS */
ads_blob_write(guc, ads.scheduler_policies, base +
@@ -693,6 +810,12 @@ int intel_guc_ads_create(struct intel_guc *guc)
return ret;
guc->ads_golden_ctxt_size = ret;
+ /* Likewise the capture lists: */
+ ret = guc_capture_prep_lists(guc);
+ if (ret < 0)
+ return ret;
+ guc->ads_capture_size = ret;
+
/* Now the total size can be determined: */
size = guc_ads_blob_size(guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
new file mode 100644
index 000000000000..c4e25966d3e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -0,0 +1,1657 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021-2022 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_engine_regs.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_lrc.h"
+#include "guc_capture_fwif.h"
+#include "intel_guc_capture.h"
+#include "intel_guc_fwif.h"
+#include "i915_drv.h"
+#include "i915_gpu_error.h"
+#include "i915_irq.h"
+#include "i915_memcpy.h"
+#include "i915_reg.h"
+
+/*
+ * Define all device tables of GuC error capture register lists
+ * NOTE: For engine-registers, GuC only needs the register offsets
+ * from the engine-mmio-base
+ */
+#define COMMON_BASE_GLOBAL \
+ { FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
+
+#define COMMON_GEN9BASE_GLOBAL \
+ { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
+ { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }, \
+ { ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
+ { DONE_REG, 0, 0, "DONE_REG" }, \
+ { HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
+
+#define COMMON_GEN12BASE_GLOBAL \
+ { GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
+ { GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
+ { GEN12_AUX_ERR_DBG, 0, 0, "AUX_ERR_DBG" }, \
+ { GEN12_GAM_DONE, 0, 0, "GAM_DONE" }, \
+ { GEN12_RING_FAULT_REG, 0, 0, "FAULT_REG" }
+
+#define COMMON_BASE_ENGINE_INSTANCE \
+ { RING_PSMI_CTL(0), 0, 0, "RC PSMI" }, \
+ { RING_ESR(0), 0, 0, "ESR" }, \
+ { RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
+ { RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
+ { RING_IPEIR(0), 0, 0, "IPEIR" }, \
+ { RING_IPEHR(0), 0, 0, "IPEHR" }, \
+ { RING_INSTPS(0), 0, 0, "INSTPS" }, \
+ { RING_BBADDR(0), 0, 0, "RING_BBADDR_LOW32" }, \
+ { RING_BBADDR_UDW(0), 0, 0, "RING_BBADDR_UP32" }, \
+ { RING_BBSTATE(0), 0, 0, "BB_STATE" }, \
+ { CCID(0), 0, 0, "CCID" }, \
+ { RING_ACTHD(0), 0, 0, "ACTHD_LDW" }, \
+ { RING_ACTHD_UDW(0), 0, 0, "ACTHD_UDW" }, \
+ { RING_INSTPM(0), 0, 0, "INSTPM" }, \
+ { RING_INSTDONE(0), 0, 0, "INSTDONE" }, \
+ { RING_NOPID(0), 0, 0, "RING_NOPID" }, \
+ { RING_START(0), 0, 0, "START" }, \
+ { RING_HEAD(0), 0, 0, "HEAD" }, \
+ { RING_TAIL(0), 0, 0, "TAIL" }, \
+ { RING_CTL(0), 0, 0, "CTL" }, \
+ { RING_MI_MODE(0), 0, 0, "MODE" }, \
+ { RING_CONTEXT_CONTROL(0), 0, 0, "RING_CONTEXT_CONTROL" }, \
+ { RING_HWS_PGA(0), 0, 0, "HWS" }, \
+ { RING_MODE_GEN7(0), 0, 0, "GFX_MODE" }, \
+ { GEN8_RING_PDP_LDW(0, 0), 0, 0, "PDP0_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 0), 0, 0, "PDP0_UDW" }, \
+ { GEN8_RING_PDP_LDW(0, 1), 0, 0, "PDP1_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 1), 0, 0, "PDP1_UDW" }, \
+ { GEN8_RING_PDP_LDW(0, 2), 0, 0, "PDP2_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 2), 0, 0, "PDP2_UDW" }, \
+ { GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
+
+#define COMMON_BASE_HAS_EU \
+ { EIR, 0, 0, "EIR" }
+
+#define COMMON_BASE_RENDER \
+ { GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
+
+#define COMMON_GEN12BASE_RENDER \
+ { GEN12_SC_INSTDONE_EXTRA, 0, 0, "GEN12_SC_INSTDONE_EXTRA" }, \
+ { GEN12_SC_INSTDONE_EXTRA2, 0, 0, "GEN12_SC_INSTDONE_EXTRA2" }
+
+#define COMMON_GEN12BASE_VEC \
+ { GEN12_SFC_DONE(0), 0, 0, "SFC_DONE[0]" }, \
+ { GEN12_SFC_DONE(1), 0, 0, "SFC_DONE[1]" }, \
+ { GEN12_SFC_DONE(2), 0, 0, "SFC_DONE[2]" }, \
+ { GEN12_SFC_DONE(3), 0, 0, "SFC_DONE[3]" }
+
+/* XE_LPD - Global */
+static const struct __guc_mmio_reg_descr xe_lpd_global_regs[] = {
+ COMMON_BASE_GLOBAL,
+ COMMON_GEN9BASE_GLOBAL,
+ COMMON_GEN12BASE_GLOBAL,
+};
+
+/* XE_LPD - Render / Compute Per-Class */
+static const struct __guc_mmio_reg_descr xe_lpd_rc_class_regs[] = {
+ COMMON_BASE_HAS_EU,
+ COMMON_BASE_RENDER,
+ COMMON_GEN12BASE_RENDER,
+};
+
+/* GEN9/XE_LPD - Render / Compute Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_rc_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* GEN9/XE_LPD - Media Decode/Encode Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_vd_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* XE_LPD - Video Enhancement Per-Class */
+static const struct __guc_mmio_reg_descr xe_lpd_vec_class_regs[] = {
+ COMMON_GEN12BASE_VEC,
+};
+
+/* GEN9/XE_LPD - Video Enhancement Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_vec_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* GEN9/XE_LPD - Blitter Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_blt_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* GEN9 - Global */
+static const struct __guc_mmio_reg_descr default_global_regs[] = {
+ COMMON_BASE_GLOBAL,
+ COMMON_GEN9BASE_GLOBAL,
+};
+
+static const struct __guc_mmio_reg_descr default_rc_class_regs[] = {
+ COMMON_BASE_HAS_EU,
+ COMMON_BASE_RENDER,
+};
+
+/*
+ * Empty lists:
+ * GEN9/XE_LPD - Blitter Per-Class
+ * GEN9/XE_LPD - Media Decode/Encode Per-Class
+ * GEN9 - VEC Class
+ */
+static const struct __guc_mmio_reg_descr empty_regs_list[] = {
+};
+
+#define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x)
+#define TO_GCAP_DEF_TYPE(x) (GUC_CAPTURE_LIST_TYPE_##x)
+#define MAKE_REGLIST(regslist, regsowner, regstype, class) \
+ { \
+ regslist, \
+ ARRAY_SIZE(regslist), \
+ TO_GCAP_DEF_OWNER(regsowner), \
+ TO_GCAP_DEF_TYPE(regstype), \
+ class, \
+ NULL, \
+ }
+
+/* List of lists */
+static struct __guc_mmio_reg_descr_group default_lists[] = {
+ MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
+ MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
+ MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
+ {}
+};
+
+static const struct __guc_mmio_reg_descr_group xe_lpd_lists[] = {
+ MAKE_REGLIST(xe_lpd_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(xe_lpd_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
+ MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(xe_lpd_vec_class_regs, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
+ MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
+ {}
+};
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
+ u32 owner, u32 type, u32 id)
+{
+ int i;
+
+ if (!reglists)
+ return NULL;
+
+ for (i = 0; reglists[i].list; ++i) {
+ if (reglists[i].owner == owner && reglists[i].type == type &&
+ (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
+ return &reglists[i];
+ }
+
+ return NULL;
+}
+
+static struct __guc_mmio_reg_descr_group *
+guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group *reglists,
+ u32 owner, u32 type, u32 id)
+{
+ int i;
+
+ if (!reglists)
+ return NULL;
+
+ for (i = 0; reglists[i].extlist; ++i) {
+ if (reglists[i].owner == owner && reglists[i].type == type &&
+ (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
+ return &reglists[i];
+ }
+
+ return NULL;
+}
+
+static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglists)
+{
+ int i = 0;
+
+ if (!reglists)
+ return;
+
+ while (reglists[i].extlist)
+ kfree(reglists[i++].extlist);
+}
+
+struct __ext_steer_reg {
+ const char *name;
+ i915_reg_t reg;
+};
+
+static const struct __ext_steer_reg xe_extregs[] = {
+ {"GEN7_SAMPLER_INSTDONE", GEN7_SAMPLER_INSTDONE},
+ {"GEN7_ROW_INSTDONE", GEN7_ROW_INSTDONE}
+};
+
+static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
+ const struct __ext_steer_reg *extlist,
+ int slice_id, int subslice_id)
+{
+ ext->reg = extlist->reg;
+ ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
+ ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
+ ext->regname = extlist->name;
+}
+
+static int
+__alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist,
+ const struct __guc_mmio_reg_descr_group *rootlist, int num_regs)
+{
+ struct __guc_mmio_reg_descr *list;
+
+ list = kcalloc(num_regs, sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ newlist->extlist = list;
+ newlist->num_regs = num_regs;
+ newlist->owner = rootlist->owner;
+ newlist->engine = rootlist->engine;
+ newlist->type = rootlist->type;
+
+ return 0;
+}
+
+static void
+guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc,
+ const struct __guc_mmio_reg_descr_group *lists)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int slice, subslice, i, num_steer_regs, num_tot_regs = 0;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_mmio_reg_descr *extarray;
+ struct sseu_dev_info *sseu;
+
+ /* In XE_LPD we only have steered registers for the render-class */
+ list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
+ /* skip if extlists was previously allocated */
+ if (!list || guc->capture->extlists)
+ return;
+
+ num_steer_regs = ARRAY_SIZE(xe_extregs);
+
+ sseu = &gt->info.sseu;
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice)
+ num_tot_regs += num_steer_regs;
+
+ if (!num_tot_regs)
+ return;
+
+ /* allocate an extra for an end marker */
+ extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
+ if (!extlists)
+ return;
+
+ if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
+ kfree(extlists);
+ return;
+ }
+
+ extarray = extlists[0].extlist;
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
+ for (i = 0; i < num_steer_regs; ++i) {
+ __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ }
+
+ guc->capture->extlists = extlists;
+}
+
+static const struct __ext_steer_reg xehpg_extregs[] = {
+ {"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG}
+};
+
+static bool __has_xehpg_extregs(u32 ipver)
+{
+ return (ipver >= IP_VER(12, 55));
+}
+
+static void
+guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
+ const struct __guc_mmio_reg_descr_group *lists,
+ u32 ipver)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct sseu_dev_info *sseu;
+ int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_mmio_reg_descr *extarray;
+
+ /* In XE_LP / HPG we only have render-class steering registers during error-capture */
+ list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
+ /* skip if extlists was previously allocated */
+ if (!list || guc->capture->extlists)
+ return;
+
+ num_steer_regs = ARRAY_SIZE(xe_extregs);
+ if (__has_xehpg_extregs(ipver))
+ num_steer_regs += ARRAY_SIZE(xehpg_extregs);
+
+ sseu = &gt->info.sseu;
+ for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
+ num_tot_regs += num_steer_regs;
+ }
+
+ if (!num_tot_regs)
+ return;
+
+ /* allocate an extra for an end marker */
+ extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
+ if (!extlists)
+ return;
+
+ if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
+ kfree(extlists);
+ return;
+ }
+
+ extarray = extlists[0].extlist;
+ for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
+ for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
+ __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ if (__has_xehpg_extregs(ipver)) {
+ for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
+ __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ }
+ }
+
+ drm_dbg(&i915->drm, "GuC-capture found %d-ext-regs.\n", num_tot_regs);
+ guc->capture->extlists = extlists;
+}
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_device_reglist(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ if (GRAPHICS_VER(i915) > 11) {
+ /*
+ * For certain engine classes, there are slice and subslice
+ * level registers requiring steering. We allocate and populate
+ * these at init time based on hw config add it as an extension
+ * list at the end of the pre-populated render list.
+ */
+ if (IS_DG2(i915))
+ guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 55));
+ else if (IS_XEHPSDV(i915))
+ guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 50));
+ else
+ guc_capture_alloc_steered_lists_xe_lpd(guc, xe_lpd_lists);
+
+ return xe_lpd_lists;
+ }
+
+ /* if GuC submission is enabled on a non-POR platform, just use a common baseline */
+ return default_lists;
+}
+
+static const char *
+__stringify_owner(u32 owner)
+{
+ switch (owner) {
+ case GUC_CAPTURE_LIST_INDEX_PF:
+ return "PF";
+ case GUC_CAPTURE_LIST_INDEX_VF:
+ return "VF";
+ default:
+ return "unknown";
+ }
+
+ return "";
+}
+
+static const char *
+__stringify_type(u32 type)
+{
+ switch (type) {
+ case GUC_CAPTURE_LIST_TYPE_GLOBAL:
+ return "Global";
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
+ return "Class";
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
+ return "Instance";
+ default:
+ return "unknown";
+ }
+
+ return "";
+}
+
+static const char *
+__stringify_engclass(u32 class)
+{
+ switch (class) {
+ case GUC_RENDER_CLASS:
+ return "Render";
+ case GUC_VIDEO_CLASS:
+ return "Video";
+ case GUC_VIDEOENHANCE_CLASS:
+ return "VideoEnhance";
+ case GUC_BLITTER_CLASS:
+ return "Blitter";
+ case GUC_COMPUTE_CLASS:
+ return "Compute";
+ default:
+ return "unknown";
+ }
+
+ return "";
+}
+
+static void
+guc_capture_warn_with_list_info(struct drm_i915_private *i915, char *msg,
+ u32 owner, u32 type, u32 classid)
+{
+ if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
+ drm_dbg(&i915->drm, "GuC-capture: %s for %s %s-Registers.\n", msg,
+ __stringify_owner(owner), __stringify_type(type));
+ else
+ drm_dbg(&i915->drm, "GuC-capture: %s for %s %s-Registers on %s-Engine\n", msg,
+ __stringify_owner(owner), __stringify_type(type),
+ __stringify_engclass(classid));
+}
+
+static int
+guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ struct guc_mmio_reg *ptr, u16 num_entries)
+{
+ u32 i = 0, j = 0;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
+ struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
+ const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
+
+ if (!reglists)
+ return -ENODEV;
+
+ match = guc_capture_get_one_list(reglists, owner, type, classid);
+ if (!match) {
+ guc_capture_warn_with_list_info(i915, "Missing register list init", owner, type,
+ classid);
+ return -ENODATA;
+ }
+
+ for (i = 0; i < num_entries && i < match->num_regs; ++i) {
+ ptr[i].offset = match->list[i].reg.reg;
+ ptr[i].value = 0xDEADF00D;
+ ptr[i].flags = match->list[i].flags;
+ ptr[i].mask = match->list[i].mask;
+ }
+
+ matchext = guc_capture_get_one_ext_list(extlists, owner, type, classid);
+ if (matchext) {
+ for (i = match->num_regs, j = 0; i < num_entries &&
+ i < (match->num_regs + matchext->num_regs) &&
+ j < matchext->num_regs; ++i, ++j) {
+ ptr[i].offset = matchext->extlist[j].reg.reg;
+ ptr[i].value = 0xDEADF00D;
+ ptr[i].flags = matchext->extlist[j].flags;
+ ptr[i].mask = matchext->extlist[j].mask;
+ }
+ }
+ if (i < num_entries)
+ drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n",
+ (int)i, (int)num_entries);
+
+ return 0;
+}
+
+static int
+guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u32 classid)
+{
+ const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
+ int num_regs;
+
+ match = guc_capture_get_one_list(gc->reglists, owner, type, classid);
+ if (!match)
+ return 0;
+
+ num_regs = match->num_regs;
+
+ matchext = guc_capture_get_one_ext_list(gc->extlists, owner, type, classid);
+ if (matchext)
+ num_regs += matchext->num_regs;
+
+ return num_regs;
+}
+
+int
+intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ size_t *size)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct intel_guc_state_capture *gc = guc->capture;
+ struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
+ int num_regs;
+
+ if (!gc->reglists)
+ return -ENODEV;
+
+ if (cache->is_valid) {
+ *size = cache->size;
+ return cache->status;
+ }
+
+ num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
+ if (!num_regs) {
+ guc_capture_warn_with_list_info(i915, "Missing register list size",
+ owner, type, classid);
+ return -ENODATA;
+ }
+
+ *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
+ (num_regs * sizeof(struct guc_mmio_reg)));
+
+ return 0;
+}
+
+static void guc_capture_create_prealloc_nodes(struct intel_guc *guc);
+
+int
+intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ void **outptr)
+{
+ struct intel_guc_state_capture *gc = guc->capture;
+ struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct guc_debug_capture_list *listnode;
+ int ret, num_regs;
+ u8 *caplist, *tmp;
+ size_t size = 0;
+
+ if (!gc->reglists)
+ return -ENODEV;
+
+ if (cache->is_valid) {
+ *outptr = cache->ptr;
+ return cache->status;
+ }
+
+ /*
+ * ADS population of input registers is a good
+ * time to pre-allocate cachelist output nodes
+ */
+ guc_capture_create_prealloc_nodes(guc);
+
+ ret = intel_guc_capture_getlistsize(guc, owner, type, classid, &size);
+ if (ret) {
+ cache->is_valid = true;
+ cache->ptr = NULL;
+ cache->size = 0;
+ cache->status = ret;
+ return ret;
+ }
+
+ caplist = kzalloc(size, GFP_KERNEL);
+ if (!caplist) {
+ drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached caplist");
+ return -ENOMEM;
+ }
+
+ /* populate capture list header */
+ tmp = caplist;
+ num_regs = guc_cap_list_num_regs(guc->capture, owner, type, classid);
+ listnode = (struct guc_debug_capture_list *)tmp;
+ listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs);
+
+ /* populate list of register descriptor */
+ tmp += sizeof(struct guc_debug_capture_list);
+ guc_capture_list_init(guc, owner, type, classid, (struct guc_mmio_reg *)tmp, num_regs);
+
+ /* cache this list */
+ cache->is_valid = true;
+ cache->ptr = caplist;
+ cache->size = size;
+ cache->status = 0;
+
+ *outptr = caplist;
+
+ return 0;
+}
+
+int
+intel_guc_capture_getnullheader(struct intel_guc *guc,
+ void **outptr, size_t *size)
+{
+ struct intel_guc_state_capture *gc = guc->capture;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int tmp = sizeof(u32) * 4;
+ void *null_header;
+
+ if (gc->ads_null_cache) {
+ *outptr = gc->ads_null_cache;
+ *size = tmp;
+ return 0;
+ }
+
+ null_header = kzalloc(tmp, GFP_KERNEL);
+ if (!null_header) {
+ drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached nulllist");
+ return -ENOMEM;
+ }
+
+ gc->ads_null_cache = null_header;
+ *outptr = null_header;
+ *size = tmp;
+
+ return 0;
+}
+
+#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
+
+int
+intel_guc_capture_output_min_size_est(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int worst_min_size = 0, num_regs = 0;
+ size_t tmp = 0;
+
+ if (!guc->capture)
+ return -ENODEV;
+
+ /*
+ * If every single engine-instance suffered a failure in quick succession but
+ * were all unrelated, then a burst of multiple error-capture events would dump
+ * registers for every one engine instance, one at a time. In this case, GuC
+ * would even dump the global-registers repeatedly.
+ *
+ * For each engine instance, there would be 1 x guc_state_capture_group_t output
+ * followed by 3 x guc_state_capture_t lists. The latter is how the register
+ * dumps are split across different register types (where the '3' are global vs class
+ * vs instance). Finally, let's multiply the whole thing by 3x (just so we are
+ * not limited to just 1 round of data in a worst case full register dump log)
+ *
+ * NOTE: intel_guc_log that allocates the log buffer would round this size up to
+ * a power of two.
+ */
+
+ for_each_engine(engine, gt, id) {
+ worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
+ (3 * sizeof(struct guc_state_capture_header_t));
+
+ if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp))
+ num_regs += tmp;
+
+ if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ engine->class, &tmp)) {
+ num_regs += tmp;
+ }
+ if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ engine->class, &tmp)) {
+ num_regs += tmp;
+ }
+ }
+
+ worst_min_size += (num_regs * sizeof(struct guc_mmio_reg));
+
+ return (worst_min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER);
+}
+
+/*
+ * KMD Init time flows:
+ * --------------------
+ * --> alloc A: GuC input capture regs lists (registered to GuC via ADS).
+ * intel_guc_ads acquires the register lists by calling
+ * intel_guc_capture_list_size and intel_guc_capture_list_get 'n' times,
+ * where n = 1 for global-reg-list +
+ * num_engine_classes for class-reg-list +
+ * num_engine_classes for instance-reg-list
+ * (since all instances of the same engine-class type
+ * have an identical engine-instance register-list).
+ * ADS module also calls separately for PF vs VF.
+ *
+ * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param))
+ * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small)
+ * Note2: 'x 3' to hold multiple capture groups
+ *
+ * GUC Runtime notify capture:
+ * --------------------------
+ * --> G2H STATE_CAPTURE_NOTIFICATION
+ * L--> intel_guc_capture_process
+ * L--> Loop through B (head..tail) and for each engine instance's
+ * err-state-captured register-list we find, we alloc 'C':
+ * --> alloc C: A capture-output-node structure that includes misc capture info along
+ * with 3 register list dumps (global, engine-class and engine-instance)
+ * This node is created from a pre-allocated list of blank nodes in
+ * guc->capture->cachelist and populated with the error-capture
+ * data from GuC and then it's added into guc->capture->outlist linked
+ * list. This list is used for matchup and printout by i915_gpu_coredump
+ * and err_print_gt, (when user invokes the error capture sysfs).
+ *
+ * GUC --> notify context reset:
+ * -----------------------------
+ * --> G2H CONTEXT RESET
+ * L--> guc_handle_context_reset --> i915_capture_error_state
+ * L--> i915_gpu_coredump(..IS_GUC_CAPTURE) --> gt_record_engines
+ * --> capture_engine(..IS_GUC_CAPTURE)
+ * L--> intel_guc_capture_get_matching_node is where
+ * detach C from internal linked list and add it into
+ * intel_engine_coredump struct (if the context and
+ * engine of the event notification matches a node
+ * in the link list).
+ *
+ * User Sysfs / Debugfs
+ * --------------------
+ * --> i915_gpu_coredump_copy_to_buffer->
+ * L--> err_print_to_sgl --> err_print_gt
+ * L--> error_print_guc_captures
+ * L--> intel_guc_capture_print_node prints the
+ * register lists values of the attached node
+ * on the error-engine-dump being reported.
+ * L--> i915_reset_error_state ... -->__i915_gpu_coredump_free
+ * L--> ... cleanup_gt -->
+ * L--> intel_guc_capture_free_node returns the
+ * capture-output-node back to the internal
+ * cachelist for reuse.
+ *
+ */
+
+static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf)
+{
+ if (buf->wr >= buf->rd)
+ return (buf->wr - buf->rd);
+ return (buf->size - buf->rd) + buf->wr;
+}
+
+static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf)
+{
+ if (buf->rd > buf->wr)
+ return (buf->size - buf->rd);
+ return (buf->wr - buf->rd);
+}
+
+/*
+ * GuC's error-capture output is a ring buffer populated in a byte-stream fashion:
+ *
+ * The GuC Log buffer region for error-capture is managed like a ring buffer.
+ * The GuC firmware dumps error capture logs into this ring in a byte-stream flow.
+ * Additionally, as per the current and foreseeable future, all packed error-
+ * capture output structures are dword aligned.
+ *
+ * That said, if the GuC firmware is in the midst of writing a structure that is larger
+ * than one dword but the tail end of the err-capture buffer-region has lesser space left,
+ * we would need to extract that structure one dword at a time straddled across the end,
+ * onto the start of the ring.
+ *
+ * Below function, guc_capture_log_remove_dw is a helper for that. All callers of this
+ * function would typically do a straight-up memcpy from the ring contents and will only
+ * call this helper if their structure-extraction is straddling across the end of the
+ * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease
+ * scalability for future expansion of output data types without requiring a redesign
+ * of the flow controls.
+ */
+static int
+guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ u32 *dw)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int tries = 2;
+ int avail = 0;
+ u32 *src_data;
+
+ if (!guc_capture_buf_cnt(buf))
+ return 0;
+
+ while (tries--) {
+ avail = guc_capture_buf_cnt_to_end(buf);
+ if (avail >= sizeof(u32)) {
+ src_data = (u32 *)(buf->data + buf->rd);
+ *dw = *src_data;
+ buf->rd += 4;
+ return 4;
+ }
+ if (avail)
+ drm_dbg(&i915->drm, "GuC-Cap-Logs not dword aligned, skipping.\n");
+ buf->rd = 0;
+ }
+
+ return 0;
+}
+
+static bool
+guc_capture_data_extracted(struct __guc_capture_bufstate *b,
+ int size, void *dest)
+{
+ if (guc_capture_buf_cnt_to_end(b) >= size) {
+ memcpy(dest, (b->data + b->rd), size);
+ b->rd += size;
+ return true;
+ }
+ return false;
+}
+
+static int
+guc_capture_log_get_group_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_state_capture_group_header_t *ghdr)
+{
+ int read = 0;
+ int fullsize = sizeof(struct guc_state_capture_group_header_t);
+
+ if (fullsize > guc_capture_buf_cnt(buf))
+ return -1;
+
+ if (guc_capture_data_extracted(buf, fullsize, (void *)ghdr))
+ return 0;
+
+ read += guc_capture_log_remove_dw(guc, buf, &ghdr->owner);
+ read += guc_capture_log_remove_dw(guc, buf, &ghdr->info);
+ if (read != fullsize)
+ return -1;
+
+ return 0;
+}
+
+static int
+guc_capture_log_get_data_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_state_capture_header_t *hdr)
+{
+ int read = 0;
+ int fullsize = sizeof(struct guc_state_capture_header_t);
+
+ if (fullsize > guc_capture_buf_cnt(buf))
+ return -1;
+
+ if (guc_capture_data_extracted(buf, fullsize, (void *)hdr))
+ return 0;
+
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->owner);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->info);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->lrca);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->guc_id);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->num_mmios);
+ if (read != fullsize)
+ return -1;
+
+ return 0;
+}
+
+static int
+guc_capture_log_get_register(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_mmio_reg *reg)
+{
+ int read = 0;
+ int fullsize = sizeof(struct guc_mmio_reg);
+
+ if (fullsize > guc_capture_buf_cnt(buf))
+ return -1;
+
+ if (guc_capture_data_extracted(buf, fullsize, (void *)reg))
+ return 0;
+
+ read += guc_capture_log_remove_dw(guc, buf, &reg->offset);
+ read += guc_capture_log_remove_dw(guc, buf, &reg->value);
+ read += guc_capture_log_remove_dw(guc, buf, &reg->flags);
+ read += guc_capture_log_remove_dw(guc, buf, &reg->mask);
+ if (read != fullsize)
+ return -1;
+
+ return 0;
+}
+
+static void
+guc_capture_delete_one_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
+{
+ int i;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
+ kfree(node->reginfo[i].regs);
+ list_del(&node->link);
+ kfree(node);
+}
+
+static void
+guc_capture_delete_prealloc_nodes(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /*
+ * NOTE: At the end of driver operation, we must assume that we
+ * have prealloc nodes in both the cachelist as well as outlist
+ * if unclaimed error capture events occurred prior to shutdown.
+ */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link)
+ guc_capture_delete_one_node(guc, n);
+
+ list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link)
+ guc_capture_delete_one_node(guc, n);
+}
+
+static void
+guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
+ struct list_head *list)
+{
+ list_add_tail(&node->link, list);
+}
+
+static void
+guc_capture_add_node_to_outlist(struct intel_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ guc_capture_add_node_to_list(node, &gc->outlist);
+}
+
+static void
+guc_capture_add_node_to_cachelist(struct intel_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ guc_capture_add_node_to_list(node, &gc->cachelist);
+}
+
+static void
+guc_capture_init_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
+{
+ struct guc_mmio_reg *tmp[GUC_CAPTURE_LIST_TYPE_MAX];
+ int i;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ tmp[i] = node->reginfo[i].regs;
+ memset(tmp[i], 0, sizeof(struct guc_mmio_reg) *
+ guc->capture->max_mmio_per_node);
+ }
+ memset(node, 0, sizeof(*node));
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
+ node->reginfo[i].regs = tmp[i];
+
+ INIT_LIST_HEAD(&node->link);
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_get_prealloc_node(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *found = NULL;
+
+ if (!list_empty(&guc->capture->cachelist)) {
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /* get first avail node from the cache list */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) {
+ found = n;
+ list_del(&n->link);
+ break;
+ }
+ } else {
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /* traverse down and steal back the oldest node already allocated */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
+ found = n;
+ }
+ if (found)
+ list_del(&found->link);
+ }
+ if (found)
+ guc_capture_init_node(guc, found);
+
+ return found;
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_alloc_one_node(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *new;
+ int i;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ new->reginfo[i].regs = kcalloc(guc->capture->max_mmio_per_node,
+ sizeof(struct guc_mmio_reg), GFP_KERNEL);
+ if (!new->reginfo[i].regs) {
+ while (i)
+ kfree(new->reginfo[--i].regs);
+ kfree(new);
+ return NULL;
+ }
+ }
+ guc_capture_init_node(guc, new);
+
+ return new;
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_clone_node(struct intel_guc *guc, struct __guc_capture_parsed_output *original,
+ u32 keep_reglist_mask)
+{
+ struct __guc_capture_parsed_output *new;
+ int i;
+
+ new = guc_capture_get_prealloc_node(guc);
+ if (!new)
+ return NULL;
+ if (!original)
+ return new;
+
+ new->is_partial = original->is_partial;
+
+ /* copy reg-lists that we want to clone */
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ if (keep_reglist_mask & BIT(i)) {
+ GEM_BUG_ON(original->reginfo[i].num_regs >
+ guc->capture->max_mmio_per_node);
+
+ memcpy(new->reginfo[i].regs, original->reginfo[i].regs,
+ original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg));
+
+ new->reginfo[i].num_regs = original->reginfo[i].num_regs;
+ new->reginfo[i].vfid = original->reginfo[i].vfid;
+
+ if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS) {
+ new->eng_class = original->eng_class;
+ } else if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
+ new->eng_inst = original->eng_inst;
+ new->guc_id = original->guc_id;
+ new->lrca = original->lrca;
+ }
+ }
+ }
+
+ return new;
+}
+
+static void
+__guc_capture_create_prealloc_nodes(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *node = NULL;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int i;
+
+ for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
+ node = guc_capture_alloc_one_node(guc);
+ if (!node) {
+ drm_warn(&i915->drm, "GuC Capture pre-alloc-cache failure\n");
+ /* dont free the priors, use what we got and cleanup at shutdown */
+ return;
+ }
+ guc_capture_add_node_to_cachelist(guc->capture, node);
+ }
+}
+
+static int
+guc_get_max_reglist_count(struct intel_guc *guc)
+{
+ int i, j, k, tmp, maxregcount = 0;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
+ for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
+ for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
+ if (j == GUC_CAPTURE_LIST_TYPE_GLOBAL && k > 0)
+ continue;
+
+ tmp = guc_cap_list_num_regs(guc->capture, i, j, k);
+ if (tmp > maxregcount)
+ maxregcount = tmp;
+ }
+ }
+ }
+ if (!maxregcount)
+ maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS;
+
+ return maxregcount;
+}
+
+static void
+guc_capture_create_prealloc_nodes(struct intel_guc *guc)
+{
+ /* skip if we've already done the pre-alloc */
+ if (guc->capture->max_mmio_per_node)
+ return;
+
+ guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc);
+ __guc_capture_create_prealloc_nodes(guc);
+}
+
+static int
+guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct guc_state_capture_group_header_t ghdr = {0};
+ struct guc_state_capture_header_t hdr = {0};
+ struct __guc_capture_parsed_output *node = NULL;
+ struct guc_mmio_reg *regs = NULL;
+ int i, numlists, numregs, ret = 0;
+ enum guc_capture_type datatype;
+ struct guc_mmio_reg tmp;
+ bool is_partial = false;
+
+ i = guc_capture_buf_cnt(buf);
+ if (!i)
+ return -ENODATA;
+ if (i % sizeof(u32)) {
+ drm_warn(&i915->drm, "GuC Capture new entries unaligned\n");
+ ret = -EIO;
+ goto bailout;
+ }
+
+ /* first get the capture group header */
+ if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) {
+ ret = -EIO;
+ goto bailout;
+ }
+ /*
+ * we would typically expect a layout as below where n would be expected to be
+ * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine
+ * instances being reset together.
+ * ____________________________________________
+ * | Capture Group |
+ * | ________________________________________ |
+ * | | Capture Group Header: | |
+ * | | - num_captures = 5 | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture1: | |
+ * | | Hdr: GLOBAL, numregs=a | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... rega | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture2: | |
+ * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regb | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture3: | |
+ * | | Hdr: INSTANCE=RCS, numregs=c | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regc | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture4: | |
+ * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regd | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture5: | |
+ * | | Hdr: INSTANCE=CCS0, numregs=e | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... rege | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * |__________________________________________|
+ */
+ is_partial = FIELD_GET(CAP_GRP_HDR_CAPTURE_TYPE, ghdr.info);
+ numlists = FIELD_GET(CAP_GRP_HDR_NUM_CAPTURES, ghdr.info);
+
+ while (numlists--) {
+ if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) {
+ ret = -EIO;
+ break;
+ }
+
+ datatype = FIELD_GET(CAP_HDR_CAPTURE_TYPE, hdr.info);
+ if (datatype > GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
+ /* unknown capture type - skip over to next capture set */
+ numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
+ while (numregs--) {
+ if (guc_capture_log_get_register(guc, buf, &tmp)) {
+ ret = -EIO;
+ break;
+ }
+ }
+ continue;
+ } else if (node) {
+ /*
+ * Based on the current capture type and what we have so far,
+ * decide if we should add the current node into the internal
+ * linked list for match-up when i915_gpu_coredump calls later
+ * (and alloc a blank node for the next set of reglists)
+ * or continue with the same node or clone the current node
+ * but only retain the global or class registers (such as the
+ * case of dependent engine resets).
+ */
+ if (datatype == GUC_CAPTURE_LIST_TYPE_GLOBAL) {
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = NULL;
+ } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS &&
+ node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS].num_regs) {
+ /* Add to list, clone node and duplicate global list */
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = guc_capture_clone_node(guc, node,
+ GCAP_PARSED_REGLIST_INDEX_GLOBAL);
+ } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE &&
+ node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE].num_regs) {
+ /* Add to list, clone node and duplicate global + class lists */
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = guc_capture_clone_node(guc, node,
+ (GCAP_PARSED_REGLIST_INDEX_GLOBAL |
+ GCAP_PARSED_REGLIST_INDEX_ENGCLASS));
+ }
+ }
+
+ if (!node) {
+ node = guc_capture_get_prealloc_node(guc);
+ if (!node) {
+ ret = -ENOMEM;
+ break;
+ }
+ if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
+ drm_dbg(&i915->drm, "GuC Capture missing global dump: %08x!\n",
+ datatype);
+ }
+ node->is_partial = is_partial;
+ node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner);
+ switch (datatype) {
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
+ node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
+ node->eng_inst = FIELD_GET(CAP_HDR_ENGINE_INSTANCE, hdr.info);
+ node->lrca = hdr.lrca;
+ node->guc_id = hdr.guc_id;
+ break;
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
+ node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
+ break;
+ default:
+ break;
+ }
+
+ numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
+ if (numregs > guc->capture->max_mmio_per_node) {
+ drm_dbg(&i915->drm, "GuC Capture list extraction clipped by prealloc!\n");
+ numregs = guc->capture->max_mmio_per_node;
+ }
+ node->reginfo[datatype].num_regs = numregs;
+ regs = node->reginfo[datatype].regs;
+ i = 0;
+ while (numregs--) {
+ if (guc_capture_log_get_register(guc, buf, &regs[i++])) {
+ ret = -EIO;
+ break;
+ }
+ }
+ }
+
+bailout:
+ if (node) {
+ /* If we have data, add to linked list for match-up when i915_gpu_coredump calls */
+ for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ if (node->reginfo[i].regs) {
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = NULL;
+ break;
+ }
+ }
+ if (node) /* else return it back to cache list */
+ guc_capture_add_node_to_cachelist(guc->capture, node);
+ }
+ return ret;
+}
+
+static int __guc_capture_flushlog_complete(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
+ GUC_CAPTURE_LOG_BUFFER
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static void __guc_capture_process_output(struct intel_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, full_count;
+ struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct guc_log_buffer_state log_buf_state_local;
+ struct guc_log_buffer_state *log_buf_state;
+ struct __guc_capture_bufstate buf;
+ void *src_data = NULL;
+ bool new_overflow;
+ int ret;
+
+ log_buf_state = guc->log.buf_addr +
+ (sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER);
+ src_data = guc->log.buf_addr + intel_guc_get_log_buffer_offset(GUC_CAPTURE_LOG_BUFFER);
+
+ /*
+ * Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state));
+ buffer_size = intel_guc_get_log_buffer_size(GUC_CAPTURE_LOG_BUFFER);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_count = log_buf_state_local.buffer_full_cnt;
+
+ /* Bookkeeping stuff */
+ guc->log.stats[GUC_CAPTURE_LOG_BUFFER].flush += log_buf_state_local.flush_to_file;
+ new_overflow = intel_guc_check_log_buf_overflow(&guc->log, GUC_CAPTURE_LOG_BUFFER,
+ full_count);
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ drm_err(&i915->drm, "invalid GuC log capture buffer state!\n");
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ buf.size = buffer_size;
+ buf.rd = read_offset;
+ buf.wr = write_offset;
+ buf.data = src_data;
+
+ if (!uc->reset_in_progress) {
+ do {
+ ret = guc_capture_extract_reglists(guc, &buf);
+ } while (ret >= 0);
+ }
+
+ /* Update the state of log buffer err-cap state */
+ log_buf_state->read_ptr = write_offset;
+ log_buf_state->flush_to_file = 0;
+ __guc_capture_flushlog_complete(guc);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+static const char *
+guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type,
+ u32 class, u32 id, u32 offset, u32 *is_ext)
+{
+ const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
+ struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
+ const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
+ int j;
+
+ *is_ext = 0;
+ if (!reglists)
+ return NULL;
+
+ match = guc_capture_get_one_list(reglists, owner, type, id);
+ if (!match)
+ return NULL;
+
+ for (j = 0; j < match->num_regs; ++j) {
+ if (offset == match->list[j].reg.reg)
+ return match->list[j].regname;
+ }
+ if (extlists) {
+ matchext = guc_capture_get_one_ext_list(extlists, owner, type, id);
+ if (!matchext)
+ return NULL;
+ for (j = 0; j < matchext->num_regs; ++j) {
+ if (offset == matchext->extlist[j].reg.reg) {
+ *is_ext = 1;
+ return matchext->extlist[j].regname;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#define __out(a, ...) \
+ do { \
+ drm_warn((&(a)->i915->drm), __VA_ARGS__); \
+ i915_error_printf((a), __VA_ARGS__); \
+ } while (0)
+#else
+#define __out(a, ...) \
+ i915_error_printf(a, __VA_ARGS__)
+#endif
+
+#define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \
+ do { \
+ __out(ebuf, " i915-Eng-Name: %s command stream\n", \
+ (eng)->name); \
+ __out(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
+ __out(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
+ __out(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
+ (eng)->logical_mask); \
+ } while (0)
+
+#define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
+ do { \
+ __out(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
+ (node)->eng_inst); \
+ __out(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
+ __out(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
+ } while (0)
+
+int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
+ const struct intel_engine_coredump *ee)
+{
+ const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = {
+ "full-capture",
+ "partial-capture"
+ };
+ const char *datatype[GUC_CAPTURE_LIST_TYPE_MAX] = {
+ "Global",
+ "Engine-Class",
+ "Engine-Instance"
+ };
+ struct intel_guc_state_capture *cap;
+ struct __guc_capture_parsed_output *node;
+ struct intel_engine_cs *eng;
+ struct guc_mmio_reg *regs;
+ struct intel_guc *guc;
+ const char *str;
+ int numregs, i, j;
+ u32 is_ext;
+
+ if (!ebuf || !ee)
+ return -EINVAL;
+ cap = ee->capture;
+ if (!cap || !ee->engine)
+ return -ENODEV;
+
+ guc = &ee->engine->gt->uc.guc;
+
+ __out(ebuf, "global --- GuC Error Capture on %s command stream:\n",
+ ee->engine->name);
+
+ node = ee->guc_capture_node;
+ if (!node) {
+ __out(ebuf, " No matching ee-node\n");
+ return 0;
+ }
+
+ __out(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
+
+ for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ __out(ebuf, " RegListType: %s\n",
+ datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
+ __out(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
+
+ switch (i) {
+ case GUC_CAPTURE_LIST_TYPE_GLOBAL:
+ default:
+ break;
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
+ __out(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
+ __out(ebuf, " i915-Eng-Class: %d\n",
+ guc_class_to_engine_class(node->eng_class));
+ break;
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
+ eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
+ if (eng)
+ GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng);
+ else
+ __out(ebuf, " i915-Eng-Lookup Fail!\n");
+ GCAP_PRINT_GUC_INST_INFO(ebuf, node);
+ break;
+ }
+
+ numregs = node->reginfo[i].num_regs;
+ __out(ebuf, " NumRegs: %d\n", numregs);
+ j = 0;
+ while (numregs--) {
+ regs = node->reginfo[i].regs;
+ str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i,
+ node->eng_class, 0, regs[j].offset, &is_ext);
+ if (!str)
+ __out(ebuf, " REG-0x%08x", regs[j].offset);
+ else
+ __out(ebuf, " %s", str);
+ if (is_ext)
+ __out(ebuf, "[%ld][%ld]",
+ FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
+ FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
+ __out(ebuf, ": 0x%08x\n", regs[j].value);
+ ++j;
+ }
+ }
+ return 0;
+}
+
+#endif //CONFIG_DRM_I915_CAPTURE_ERROR
+
+void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
+{
+ if (!ee || !ee->guc_capture_node)
+ return;
+
+ guc_capture_add_node_to_cachelist(ee->capture, ee->guc_capture_node);
+ ee->capture = NULL;
+ ee->guc_capture_node = NULL;
+}
+
+void intel_guc_capture_get_matching_node(struct intel_gt *gt,
+ struct intel_engine_coredump *ee,
+ struct intel_context *ce)
+{
+ struct __guc_capture_parsed_output *n, *ntmp;
+ struct drm_i915_private *i915;
+ struct intel_guc *guc;
+
+ if (!gt || !ee || !ce)
+ return;
+
+ i915 = gt->i915;
+ guc = &gt->uc.guc;
+ if (!guc->capture)
+ return;
+
+ GEM_BUG_ON(ee->guc_capture_node);
+ /*
+ * Look for a matching GuC reported error capture node from
+ * the internal output link-list based on lrca, guc-id and engine
+ * identification.
+ */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
+ if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
+ n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
+ n->guc_id && n->guc_id == ce->guc_id.id &&
+ (n->lrca & CTX_GTT_ADDRESS_MASK) && (n->lrca & CTX_GTT_ADDRESS_MASK) ==
+ (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
+ list_del(&n->link);
+ ee->guc_capture_node = n;
+ ee->capture = guc->capture;
+ return;
+ }
+ }
+ drm_dbg(&i915->drm, "GuC capture can't match ee to node\n");
+}
+
+void intel_guc_capture_process(struct intel_guc *guc)
+{
+ if (guc->capture)
+ __guc_capture_process_output(guc);
+}
+
+static void
+guc_capture_free_ads_cache(struct intel_guc_state_capture *gc)
+{
+ int i, j, k;
+ struct __guc_capture_ads_cache *cache;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
+ for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
+ for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
+ cache = &gc->ads_cache[i][j][k];
+ if (cache->is_valid)
+ kfree(cache->ptr);
+ }
+ }
+ }
+ kfree(gc->ads_null_cache);
+}
+
+void intel_guc_capture_destroy(struct intel_guc *guc)
+{
+ if (!guc->capture)
+ return;
+
+ guc_capture_free_ads_cache(guc->capture);
+
+ guc_capture_delete_prealloc_nodes(guc);
+
+ guc_capture_free_extlists(guc->capture->extlists);
+ kfree(guc->capture->extlists);
+
+ kfree(guc->capture);
+ guc->capture = NULL;
+}
+
+int intel_guc_capture_init(struct intel_guc *guc)
+{
+ guc->capture = kzalloc(sizeof(*guc->capture), GFP_KERNEL);
+ if (!guc->capture)
+ return -ENOMEM;
+
+ guc->capture->reglists = guc_capture_get_device_reglist(guc);
+
+ INIT_LIST_HEAD(&guc->capture->outlist);
+ INIT_LIST_HEAD(&guc->capture->cachelist);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
new file mode 100644
index 000000000000..d3d7bd0b6db6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2021 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_CAPTURE_H
+#define _INTEL_GUC_CAPTURE_H
+
+#include <linux/types.h>
+
+struct drm_i915_error_state_buf;
+struct guc_gt_system_info;
+struct intel_engine_coredump;
+struct intel_context;
+struct intel_gt;
+struct intel_guc;
+
+void intel_guc_capture_free_node(struct intel_engine_coredump *ee);
+int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_coredump *ee);
+void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_engine_coredump *ee,
+ struct intel_context *ce);
+void intel_guc_capture_process(struct intel_guc *guc);
+int intel_guc_capture_output_min_size_est(struct intel_guc *guc);
+int intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ void **outptr);
+int intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ size_t *size);
+int intel_guc_capture_getnullheader(struct intel_guc *guc, void **outptr, size_t *size);
+void intel_guc_capture_destroy(struct intel_guc *guc);
+int intel_guc_capture_init(struct intel_guc *guc);
+
+#endif /* _INTEL_GUC_CAPTURE_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 2f7fc87a78e1..f01325cd1b62 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -6,6 +6,7 @@
#include <linux/circ_buf.h>
#include <linux/ktime.h>
#include <linux/time64.h>
+#include <linux/string_helpers.h>
#include <linux/timekeeping.h>
#include "i915_drv.h"
@@ -170,7 +171,7 @@ static int ct_control_enable(struct intel_guc_ct *ct, bool enable)
GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE);
if (unlikely(err))
CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n",
- enabledisable(enable), ERR_PTR(err));
+ str_enable_disable(enable), ERR_PTR(err));
return err;
}
@@ -1202,7 +1203,7 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
void intel_guc_ct_print_info(struct intel_guc_ct *ct,
struct drm_printer *p)
{
- drm_printf(p, "CT %s\n", enableddisabled(ct->enabled));
+ drm_printf(p, "CT %s\n", str_enabled_disabled(ct->enabled));
if (!ct->enabled)
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 4b300b6cc0f9..42cb7a9a6199 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -32,8 +32,8 @@
#define GUC_CLIENT_PRIORITY_NORMAL 3
#define GUC_CLIENT_PRIORITY_NUM 4
-#define GUC_MAX_LRC_DESCRIPTORS 65535
-#define GUC_INVALID_LRC_ID GUC_MAX_LRC_DESCRIPTORS
+#define GUC_MAX_CONTEXT_ID 65535
+#define GUC_INVALID_CONTEXT_ID GUC_MAX_CONTEXT_ID
#define GUC_RENDER_ENGINE 0
#define GUC_VIDEO_ENGINE 1
@@ -98,7 +98,13 @@
#define GUC_LOG_BUF_ADDR_SHIFT 12
#define GUC_CTL_WA 1
-#define GUC_WA_POLLCS BIT(18)
+#define GUC_WA_GAM_CREDITS BIT(10)
+#define GUC_WA_DUAL_QUEUE BIT(11)
+#define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13)
+#define GUC_WA_CONTEXT_ISOLATION BIT(15)
+#define GUC_WA_PRE_PARSER BIT(14)
+#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
+#define GUC_WA_POLLCS BIT(18)
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
@@ -197,54 +203,45 @@ struct guc_wq_item {
u32 fence_id;
} __packed;
-struct guc_process_desc {
- u32 stage_id;
- u64 db_base_addr;
+struct guc_sched_wq_desc {
u32 head;
u32 tail;
u32 error_offset;
- u64 wq_base_addr;
- u32 wq_size_bytes;
u32 wq_status;
- u32 engine_presence;
- u32 priority;
- u32 reserved[36];
+ u32 reserved[28];
} __packed;
+/* Helper for context registration H2G */
+struct guc_ctxt_registration_info {
+ u32 flags;
+ u32 context_idx;
+ u32 engine_class;
+ u32 engine_submit_mask;
+ u32 wq_desc_lo;
+ u32 wq_desc_hi;
+ u32 wq_base_lo;
+ u32 wq_base_hi;
+ u32 wq_size;
+ u32 hwlrca_lo;
+ u32 hwlrca_hi;
+};
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
-#define CONTEXT_POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000
-#define CONTEXT_POLICY_DEFAULT_PREEMPTION_TIME_US 500000
+/* 32-bit KLV structure as used by policy updates and others */
+struct guc_klv_generic_dw_t {
+ u32 kl;
+ u32 value;
+} __packed;
-/* Preempt to idle on quantum expiry */
-#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE BIT(0)
+/* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */
+struct guc_update_context_policy_header {
+ u32 action;
+ u32 ctx_id;
+} __packed;
-/*
- * GuC Context registration descriptor.
- * FIXME: This is only required to exist during context registration.
- * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
- * is not required.
- */
-struct guc_lrc_desc {
- u32 hw_context_desc;
- u32 slpm_perf_mode_hint; /* SPLC v1 only */
- u32 slpm_freq_hint;
- u32 engine_submit_mask; /* In logical space */
- u8 engine_class;
- u8 reserved0[3];
- u32 priority;
- u32 process_desc;
- u32 wq_addr;
- u32 wq_size;
- u32 context_flags; /* CONTEXT_REGISTRATION_* */
- /* Time for one workload to execute. (in micro seconds) */
- u32 execution_quantum;
- /* Time to wait for a preemption request to complete before issuing a
- * reset. (in micro seconds).
- */
- u32 preemption_timeout;
- u32 policy_flags; /* CONTEXT_POLICY_* */
- u32 reserved1[19];
+struct guc_update_context_policy {
+ struct guc_update_context_policy_header header;
+ struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
} __packed;
#define GUC_POWER_UNSPECIFIED 0
@@ -285,10 +282,13 @@ struct guc_mmio_reg {
u32 offset;
u32 value;
u32 flags;
- u32 mask;
#define GUC_REGSET_MASKED BIT(0)
+#define GUC_REGSET_NEEDS_STEERING BIT(1)
#define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
#define GUC_REGSET_RESTORE_ONLY BIT(3)
+#define GUC_REGSET_STEERING_GROUP GENMASK(15, 12)
+#define GUC_REGSET_STEERING_INSTANCE GENMASK(23, 20)
+ u32 mask;
} __packed;
/* GuC register sets */
@@ -311,6 +311,14 @@ enum {
GUC_CAPTURE_LIST_INDEX_MAX = 2,
};
+/*Register-types of GuC capture register lists */
+enum guc_capture_type {
+ GUC_CAPTURE_LIST_TYPE_GLOBAL = 0,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ GUC_CAPTURE_LIST_TYPE_MAX,
+};
+
/* GuC Additional Data Struct */
struct guc_ads {
struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
new file mode 100644
index 000000000000..79c66b6b51a3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "gt/intel_gt.h"
+#include "gt/intel_hwconfig.h"
+#include "i915_drv.h"
+#include "i915_memcpy.h"
+
+/*
+ * GuC has a blob containing hardware configuration information (HWConfig).
+ * This is formatted as a simple and flexible KLV (Key/Length/Value) table.
+ *
+ * For example, a minimal version could be:
+ * enum device_attr {
+ * ATTR_SOME_VALUE = 0,
+ * ATTR_SOME_MASK = 1,
+ * };
+ *
+ * static const u32 hwconfig[] = {
+ * ATTR_SOME_VALUE,
+ * 1, // Value Length in DWords
+ * 8, // Value
+ *
+ * ATTR_SOME_MASK,
+ * 3,
+ * 0x00FFFFFFFF, 0xFFFFFFFF, 0xFF000000,
+ * };
+ *
+ * The attribute ids are defined in a hardware spec.
+ */
+
+static int __guc_action_get_hwconfig(struct intel_guc *guc,
+ u32 ggtt_offset, u32 ggtt_size)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_GET_HWCONFIG,
+ lower_32_bits(ggtt_offset),
+ upper_32_bits(ggtt_offset),
+ ggtt_size,
+ };
+ int ret;
+
+ ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+ if (ret == -ENXIO)
+ return -ENOENT;
+
+ return ret;
+}
+
+static int guc_hwconfig_discover_size(struct intel_guc *guc, struct intel_hwconfig *hwconfig)
+{
+ int ret;
+
+ /*
+ * Sending a query with zero offset and size will return the
+ * size of the blob.
+ */
+ ret = __guc_action_get_hwconfig(guc, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0)
+ return -EINVAL;
+
+ hwconfig->size = ret;
+ return 0;
+}
+
+static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig *hwconfig)
+{
+ struct i915_vma *vma;
+ u32 ggtt_offset;
+ void *vaddr;
+ int ret;
+
+ GEM_BUG_ON(!hwconfig->size);
+
+ ret = intel_guc_allocate_and_map_vma(guc, hwconfig->size, &vma, &vaddr);
+ if (ret)
+ return ret;
+
+ ggtt_offset = intel_guc_ggtt_offset(guc, vma);
+
+ ret = __guc_action_get_hwconfig(guc, ggtt_offset, hwconfig->size);
+ if (ret >= 0)
+ memcpy(hwconfig->ptr, vaddr, hwconfig->size);
+
+ i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+
+ return ret;
+}
+
+static bool has_table(struct drm_i915_private *i915)
+{
+ if (IS_ALDERLAKE_P(i915))
+ return true;
+ if (IS_DG2(i915))
+ return true;
+
+ return false;
+}
+
+/**
+ * intel_guc_hwconfig_init - Initialize the HWConfig
+ *
+ * Retrieve the HWConfig table from the GuC and save it locally.
+ * It can then be queried on demand by other users later on.
+ */
+static int guc_hwconfig_init(struct intel_gt *gt)
+{
+ struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
+ struct intel_guc *guc = &gt->uc.guc;
+ int ret;
+
+ if (!has_table(gt->i915))
+ return 0;
+
+ ret = guc_hwconfig_discover_size(guc, hwconfig);
+ if (ret)
+ return ret;
+
+ hwconfig->ptr = kmalloc(hwconfig->size, GFP_KERNEL);
+ if (!hwconfig->ptr) {
+ hwconfig->size = 0;
+ return -ENOMEM;
+ }
+
+ ret = guc_hwconfig_fill_buffer(guc, hwconfig);
+ if (ret < 0) {
+ intel_gt_fini_hwconfig(gt);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_gt_init_hwconfig - Initialize the HWConfig if available
+ *
+ * Retrieve the HWConfig table if available on the current platform.
+ */
+int intel_gt_init_hwconfig(struct intel_gt *gt)
+{
+ if (!intel_uc_uses_guc(&gt->uc))
+ return 0;
+
+ return guc_hwconfig_init(gt);
+}
+
+/**
+ * intel_gt_fini_hwconfig - Finalize the HWConfig
+ *
+ * Free up the memory allocation holding the table.
+ */
+void intel_gt_fini_hwconfig(struct intel_gt *gt)
+{
+ struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
+
+ kfree(hwconfig->ptr);
+ hwconfig->size = 0;
+ hwconfig->ptr = NULL;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index b53f61f3101f..78d2989fe917 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -4,14 +4,16 @@
*/
#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
+#include "intel_guc_capture.h"
#include "intel_guc_log.h"
-static void guc_log_capture_logs(struct intel_guc_log *log);
+static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
/**
* DOC: GuC firmware log
@@ -25,7 +27,8 @@ static void guc_log_capture_logs(struct intel_guc_log *log);
static int guc_action_flush_log_complete(struct intel_guc *guc)
{
u32 action[] = {
- INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
+ GUC_DEBUG_LOG_BUFFER
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
@@ -136,7 +139,7 @@ static void guc_move_to_next_buf(struct intel_guc_log *log)
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(log->relay.channel, log->vma->obj->base.size);
+ relay_reserve(log->relay.channel, log->vma->obj->base.size - CAPTURE_BUFFER_SIZE);
/* Switch to the next sub buffer */
relay_flush(log->relay.channel);
@@ -156,9 +159,9 @@ static void *guc_get_write_buffer(struct intel_guc_log *log)
return relay_reserve(log->relay.channel, 0);
}
-static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
- enum guc_log_buffer_type type,
- unsigned int full_cnt)
+bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
+ enum guc_log_buffer_type type,
+ unsigned int full_cnt)
{
unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
bool overflow = false;
@@ -181,7 +184,7 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
return overflow;
}
-static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
+unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type)
{
switch (type) {
case GUC_DEBUG_LOG_BUFFER:
@@ -197,7 +200,21 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
return 0;
}
-static void guc_read_update_log_buffer(struct intel_guc_log *log)
+size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type)
+{
+ enum guc_log_buffer_type i;
+ size_t offset = PAGE_SIZE;/* for the log_buffer_states */
+
+ for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) {
+ if (i == type)
+ break;
+ offset += intel_guc_get_log_buffer_size(i);
+ }
+
+ return offset;
+}
+
+static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
@@ -212,7 +229,8 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
goto out_unlock;
/* Get the pointer to shared GuC log buffer */
- log_buf_state = src_data = log->relay.buf_addr;
+ src_data = log->buf_addr;
+ log_buf_state = src_data;
/* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
@@ -222,7 +240,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
* Used rate limited to avoid deluge of messages, logs might be
* getting consumed by User at a slow rate.
*/
- DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+ DRM_ERROR_RATELIMITED("no sub-buffer to copy general logs\n");
log->relay.full_count++;
goto out_unlock;
@@ -232,7 +250,8 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
src_data += PAGE_SIZE;
dst_data += PAGE_SIZE;
- for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ /* For relay logging, we exclude error state capture */
+ for (type = GUC_DEBUG_LOG_BUFFER; type <= GUC_CRASH_DUMP_LOG_BUFFER; type++) {
/*
* Make a copy of the state structure, inside GuC log buffer
* (which is uncached mapped), on the stack to avoid reading
@@ -240,14 +259,14 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
*/
memcpy(&log_buf_state_local, log_buf_state,
sizeof(struct guc_log_buffer_state));
- buffer_size = guc_get_log_buffer_size(type);
+ buffer_size = intel_guc_get_log_buffer_size(type);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_cnt = log_buf_state_local.buffer_full_cnt;
/* Bookkeeping stuff */
log->stats[type].flush += log_buf_state_local.flush_to_file;
- new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
+ new_overflow = intel_guc_check_log_buf_overflow(log, type, full_cnt);
/* Update the state of shared log buffer */
log_buf_state->read_ptr = write_offset;
@@ -300,49 +319,43 @@ out_unlock:
mutex_unlock(&log->relay.lock);
}
-static void capture_logs_work(struct work_struct *work)
+static void copy_debug_logs_work(struct work_struct *work)
{
struct intel_guc_log *log =
container_of(work, struct intel_guc_log, relay.flush_work);
- guc_log_capture_logs(log);
+ guc_log_copy_debuglogs_for_relay(log);
}
-static int guc_log_map(struct intel_guc_log *log)
+static int guc_log_relay_map(struct intel_guc_log *log)
{
- void *vaddr;
-
lockdep_assert_held(&log->relay.lock);
- if (!log->vma)
+ if (!log->vma || !log->buf_addr)
return -ENODEV;
/*
- * Create a WC (Uncached for read) vmalloc mapping of log
- * buffer pages, so that we can directly get the data
- * (up-to-date) from memory.
+ * WC vmalloc mapping of log buffer pages was done at
+ * GuC Log Init time, but lets keep a ref for book-keeping
*/
- vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
-
- log->relay.buf_addr = vaddr;
+ i915_gem_object_get(log->vma->obj);
+ log->relay.buf_in_use = true;
return 0;
}
-static void guc_log_unmap(struct intel_guc_log *log)
+static void guc_log_relay_unmap(struct intel_guc_log *log)
{
lockdep_assert_held(&log->relay.lock);
- i915_gem_object_unpin_map(log->vma->obj);
- log->relay.buf_addr = NULL;
+ i915_gem_object_put(log->vma->obj);
+ log->relay.buf_in_use = false;
}
void intel_guc_log_init_early(struct intel_guc_log *log)
{
mutex_init(&log->relay.lock);
- INIT_WORK(&log->relay.flush_work, capture_logs_work);
+ INIT_WORK(&log->relay.flush_work, copy_debug_logs_work);
log->relay.started = false;
}
@@ -357,8 +370,11 @@ static int guc_log_relay_create(struct intel_guc_log *log)
lockdep_assert_held(&log->relay.lock);
GEM_BUG_ON(!log->vma);
- /* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = log->vma->size;
+ /*
+ * Keep the size of sub buffers same as shared log buffer
+ * but GuC log-events excludes the error-state-capture logs
+ */
+ subbuf_size = log->vma->size - CAPTURE_BUFFER_SIZE;
/*
* Store up to 8 snapshots, which is large enough to buffer sufficient
@@ -393,13 +409,13 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
log->relay.channel = NULL;
}
-static void guc_log_capture_logs(struct intel_guc_log *log)
+static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
- guc_read_update_log_buffer(log);
+ _guc_log_copy_debuglogs_for_relay(log);
/*
* Generally device is expected to be active only at this
@@ -439,6 +455,7 @@ int intel_guc_log_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct i915_vma *vma;
+ void *vaddr;
u32 guc_log_size;
int ret;
@@ -446,23 +463,28 @@ int intel_guc_log_create(struct intel_guc_log *log)
/*
* GuC Log buffer Layout
+ * (this ordering must follow "enum guc_log_buffer_type" definition)
*
* +===============================+ 00B
- * | Crash dump state header |
- * +-------------------------------+ 32B
* | Debug state header |
+ * +-------------------------------+ 32B
+ * | Crash dump state header |
* +-------------------------------+ 64B
* | Capture state header |
* +-------------------------------+ 96B
* | |
* +===============================+ PAGE_SIZE (4KB)
- * | Crash Dump logs |
- * +===============================+ + CRASH_SIZE
* | Debug logs |
* +===============================+ + DEBUG_SIZE
+ * | Crash Dump logs |
+ * +===============================+ + CRASH_SIZE
* | Capture logs |
* +===============================+ + CAPTURE_SIZE
*/
+ if (intel_guc_capture_output_min_size_est(guc) > CAPTURE_BUFFER_SIZE)
+ DRM_WARN("GuC log buffer for state_capture maybe too small. %d < %d\n",
+ CAPTURE_BUFFER_SIZE, intel_guc_capture_output_min_size_est(guc));
+
guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE +
CAPTURE_BUFFER_SIZE;
@@ -473,23 +495,35 @@ int intel_guc_log_create(struct intel_guc_log *log)
}
log->vma = vma;
+ /*
+ * Create a WC (Uncached for read) vmalloc mapping up front immediate access to
+ * data from memory during critical events such as error capture
+ */
+ vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ i915_vma_unpin_and_release(&log->vma, 0);
+ goto err;
+ }
+ log->buf_addr = vaddr;
log->level = __get_default_log_level(log);
DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
- log->level, enableddisabled(log->level),
- yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
+ log->level, str_enabled_disabled(log->level),
+ str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
return 0;
err:
- DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
+ DRM_ERROR("Failed to allocate or map GuC log buffer. %d\n", ret);
return ret;
}
void intel_guc_log_destroy(struct intel_guc_log *log)
{
- i915_vma_unpin_and_release(&log->vma, 0);
+ log->buf_addr = NULL;
+ i915_vma_unpin_and_release(&log->vma, I915_VMA_RELEASE_MAP);
}
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
@@ -534,7 +568,7 @@ out_unlock:
bool intel_guc_log_relay_created(const struct intel_guc_log *log)
{
- return log->relay.buf_addr;
+ return log->buf_addr;
}
int intel_guc_log_relay_open(struct intel_guc_log *log)
@@ -565,7 +599,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
if (ret)
goto out_unlock;
- ret = guc_log_map(log);
+ ret = guc_log_relay_map(log);
if (ret)
goto out_relay;
@@ -615,8 +649,8 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
guc_action_flush_log(guc);
- /* GuC would have updated log buffer by now, so capture it */
- guc_log_capture_logs(log);
+ /* GuC would have updated log buffer by now, so copy it */
+ guc_log_copy_debuglogs_for_relay(log);
}
/*
@@ -645,7 +679,7 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
GEM_BUG_ON(!intel_guc_log_relay_created(log));
- guc_log_unmap(log);
+ guc_log_relay_unmap(log);
guc_log_relay_destroy(log);
mutex_unlock(&log->relay.lock);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index d7e1b6471fed..18007e639be9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -49,8 +49,9 @@ struct intel_guc;
struct intel_guc_log {
u32 level;
struct i915_vma *vma;
+ void *buf_addr;
struct {
- void *buf_addr;
+ bool buf_in_use;
bool started;
struct work_struct flush_work;
struct rchan *channel;
@@ -66,6 +67,10 @@ struct intel_guc_log {
};
void intel_guc_log_init_early(struct intel_guc_log *log);
+bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, enum guc_log_buffer_type type,
+ unsigned int full_cnt);
+unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type);
+size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type);
int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index fc805d466d99..e00661fb0853 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <linux/string_helpers.h>
+
#include "intel_guc_rc.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
@@ -59,12 +61,12 @@ static int __guc_rc_control(struct intel_guc *guc, bool enable)
ret = guc_action_control_gucrc(guc, enable);
if (ret) {
drm_err(drm, "Failed to %s GuC RC (%pe)\n",
- enabledisable(enable), ERR_PTR(ret));
+ str_enable_disable(enable), ERR_PTR(ret));
return ret;
}
drm_info(&gt->i915->drm, "GuC RC: %s\n",
- enableddisabled(enable));
+ str_enabled_disabled(enable));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index 66027a42cda9..ad570fa002a6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -28,7 +28,7 @@
#define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT)
#define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT)
#define GS_AUTH_STATUS_SHIFT 30
-#define GS_AUTH_STATUS_MASK (0x03 << GS_AUTH_STATUS_SHIFT)
+#define GS_AUTH_STATUS_MASK (0x03U << GS_AUTH_STATUS_SHIFT)
#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT)
#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index ac749ab11035..1db833da42df 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_cache.h>
+#include <linux/string_helpers.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -152,8 +153,8 @@ static int slpc_query_task_state(struct intel_guc_slpc *slpc)
ret = guc_action_slpc_query(guc, offset);
if (unlikely(ret))
- drm_err(&i915->drm, "Failed to query task state (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "Failed to query task state (%pe)\n",
+ ERR_PTR(ret));
drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
@@ -170,8 +171,8 @@ static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
ret = guc_action_slpc_set_param(guc, id, value);
if (ret)
- drm_err(&i915->drm, "Failed to set param %d to %u (%pe)\n",
- id, value, ERR_PTR(ret));
+ i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
+ id, value, ERR_PTR(ret));
return ret;
}
@@ -211,8 +212,8 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
freq);
if (ret)
- drm_err(&i915->drm, "Unable to force min freq to %u: %d",
- freq, ret);
+ i915_probe_error(i915, "Unable to force min freq to %u: %d",
+ freq, ret);
}
return ret;
@@ -247,9 +248,9 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
if (unlikely(err)) {
- drm_err(&i915->drm,
- "Failed to allocate SLPC struct (err=%pe)\n",
- ERR_PTR(err));
+ i915_probe_error(i915,
+ "Failed to allocate SLPC struct (err=%pe)\n",
+ ERR_PTR(err));
return err;
}
@@ -316,15 +317,15 @@ static int slpc_reset(struct intel_guc_slpc *slpc)
ret = guc_action_slpc_reset(guc, offset);
if (unlikely(ret < 0)) {
- drm_err(&i915->drm, "SLPC reset action failed (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
if (!ret) {
if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
- drm_err(&i915->drm, "SLPC not enabled! State = %s\n",
- slpc_get_state_string(slpc));
+ i915_probe_error(i915, "SLPC not enabled! State = %s\n",
+ slpc_get_state_string(slpc));
return -EIO;
}
}
@@ -581,16 +582,12 @@ static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
{
struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
- u32 rp_state_cap;
+ struct intel_rps_freq_caps caps;
- rp_state_cap = intel_rps_read_state_cap(rps);
-
- slpc->rp0_freq = REG_FIELD_GET(RP0_CAP_MASK, rp_state_cap) *
- GT_FREQUENCY_MULTIPLIER;
- slpc->rp1_freq = REG_FIELD_GET(RP1_CAP_MASK, rp_state_cap) *
- GT_FREQUENCY_MULTIPLIER;
- slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) *
- GT_FREQUENCY_MULTIPLIER;
+ gen6_rps_get_freq_caps(rps, &caps);
+ slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
+ slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
+ slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
if (!slpc->boost_freq)
slpc->boost_freq = slpc->rp0_freq;
@@ -620,8 +617,8 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
ret = slpc_reset(slpc);
if (unlikely(ret < 0)) {
- drm_err(&i915->drm, "SLPC Reset event returned (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
@@ -636,24 +633,24 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Ignore efficient freq and set min to platform min */
ret = slpc_ignore_eff_freq(slpc, true);
if (unlikely(ret)) {
- drm_err(&i915->drm, "Failed to set SLPC min to RPn (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "Failed to set SLPC min to RPn (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
- drm_err(&i915->drm, "Failed to set SLPC max to RP0 (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc);
if (unlikely(ret)) {
- drm_err(&i915->drm, "Failed to set SLPC softlimits (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
@@ -719,7 +716,7 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p
drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
drm_printf(p, "\tGTPERF task active: %s\n",
- yesno(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
+ str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
drm_printf(p, "\tMax freq: %u MHz\n",
slpc_decode_max_freq(slpc));
drm_printf(p, "\tMin freq: %u MHz\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 28f9aac0201d..1726f0f19901 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -25,6 +25,7 @@
#include "gt/intel_ring.h"
#include "intel_guc_ads.h"
+#include "intel_guc_capture.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -161,7 +162,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
#define SCHED_STATE_ENABLED BIT(4)
#define SCHED_STATE_PENDING_ENABLE BIT(5)
#define SCHED_STATE_REGISTERED BIT(6)
-#define SCHED_STATE_BLOCKED_SHIFT 7
+#define SCHED_STATE_POLICY_REQUIRED BIT(7)
+#define SCHED_STATE_BLOCKED_SHIFT 8
#define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
#define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
@@ -300,6 +302,23 @@ static inline void clr_context_registered(struct intel_context *ce)
ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
}
+static inline bool context_policy_required(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED;
+}
+
+static inline void set_context_policy_required(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED;
+}
+
+static inline void clr_context_policy_required(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED;
+}
+
static inline u32 context_blocked(struct intel_context *ce)
{
return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
@@ -351,12 +370,12 @@ request_to_scheduling_context(struct i915_request *rq)
static inline bool context_guc_id_invalid(struct intel_context *ce)
{
- return ce->guc_id.id == GUC_INVALID_LRC_ID;
+ return ce->guc_id.id == GUC_INVALID_CONTEXT_ID;
}
static inline void set_context_guc_id_invalid(struct intel_context *ce)
{
- ce->guc_id.id = GUC_INVALID_LRC_ID;
+ ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
}
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
@@ -395,12 +414,12 @@ struct sync_semaphore {
};
struct parent_scratch {
- struct guc_process_desc pdesc;
+ struct guc_sched_wq_desc wq_desc;
struct sync_semaphore go;
struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
- u8 unused[WQ_OFFSET - sizeof(struct guc_process_desc) -
+ u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) -
sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
u32 wq[WQ_SIZE / sizeof(u32)];
@@ -437,15 +456,15 @@ __get_parent_scratch(struct intel_context *ce)
LRC_STATE_OFFSET) / sizeof(u32)));
}
-static struct guc_process_desc *
-__get_process_desc(struct intel_context *ce)
+static struct guc_sched_wq_desc *
+__get_wq_desc(struct intel_context *ce)
{
struct parent_scratch *ps = __get_parent_scratch(ce);
- return &ps->pdesc;
+ return &ps->wq_desc;
}
-static u32 *get_wq_pointer(struct guc_process_desc *desc,
+static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
struct intel_context *ce,
u32 wqi_size)
{
@@ -457,7 +476,7 @@ static u32 *get_wq_pointer(struct guc_process_desc *desc,
#define AVAILABLE_SPACE \
CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
if (wqi_size > AVAILABLE_SPACE) {
- ce->parallel.guc.wqi_head = READ_ONCE(desc->head);
+ ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head);
if (wqi_size > AVAILABLE_SPACE)
return NULL;
@@ -467,75 +486,27 @@ static u32 *get_wq_pointer(struct guc_process_desc *desc,
return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
}
-static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
-{
- struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
-
- GEM_BUG_ON(index >= GUC_MAX_LRC_DESCRIPTORS);
-
- return &base[index];
-}
-
static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
{
struct intel_context *ce = xa_load(&guc->context_lookup, id);
- GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS);
+ GEM_BUG_ON(id >= GUC_MAX_CONTEXT_ID);
return ce;
}
-static int guc_lrc_desc_pool_create(struct intel_guc *guc)
-{
- u32 size;
- int ret;
-
- size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) *
- GUC_MAX_LRC_DESCRIPTORS);
- ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool,
- (void **)&guc->lrc_desc_pool_vaddr);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void guc_lrc_desc_pool_destroy(struct intel_guc *guc)
-{
- guc->lrc_desc_pool_vaddr = NULL;
- i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP);
-}
-
static inline bool guc_submission_initialized(struct intel_guc *guc)
{
- return !!guc->lrc_desc_pool_vaddr;
-}
-
-static inline void reset_lrc_desc(struct intel_guc *guc, u32 id)
-{
- if (likely(guc_submission_initialized(guc))) {
- struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
- unsigned long flags;
-
- memset(desc, 0, sizeof(*desc));
-
- /*
- * xarray API doesn't have xa_erase_irqsave wrapper, so calling
- * the lower level functions directly.
- */
- xa_lock_irqsave(&guc->context_lookup, flags);
- __xa_erase(&guc->context_lookup, id);
- xa_unlock_irqrestore(&guc->context_lookup, flags);
- }
+ return guc->submission_initialized;
}
-static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id)
+static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
{
return __get_context(guc, id);
}
-static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
- struct intel_context *ce)
+static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
+ struct intel_context *ce)
{
unsigned long flags;
@@ -548,6 +519,22 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
+static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
+{
+ unsigned long flags;
+
+ if (unlikely(!guc_submission_initialized(guc)))
+ return;
+
+ /*
+ * xarray API doesn't have xa_erase_irqsave wrapper, so calling
+ * the lower level functions directly.
+ */
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ __xa_erase(&guc->context_lookup, id);
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
static void decr_outstanding_submission_g2h(struct intel_guc *guc)
{
if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
@@ -624,7 +611,8 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
true, timeout);
}
-static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
+static int guc_context_policy_init(struct intel_context *ce, bool loop);
+static int try_context_registration(struct intel_context *ce, bool loop);
static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
@@ -650,6 +638,12 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce));
+ if (context_policy_required(ce)) {
+ err = guc_context_policy_init(ce, false);
+ if (err)
+ return err;
+ }
+
spin_lock(&ce->guc_state.lock);
/*
@@ -743,7 +737,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce)
return (WQ_SIZE - ce->parallel.guc.wqi_tail);
}
-static void write_wqi(struct guc_process_desc *desc,
+static void write_wqi(struct guc_sched_wq_desc *wq_desc,
struct intel_context *ce,
u32 wqi_size)
{
@@ -756,13 +750,13 @@ static void write_wqi(struct guc_process_desc *desc,
ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
(WQ_SIZE - 1);
- WRITE_ONCE(desc->tail, ce->parallel.guc.wqi_tail);
+ WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail);
}
static int guc_wq_noop_append(struct intel_context *ce)
{
- struct guc_process_desc *desc = __get_process_desc(ce);
- u32 *wqi = get_wq_pointer(desc, ce, wq_space_until_wrap(ce));
+ struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
+ u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce));
u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
if (!wqi)
@@ -781,7 +775,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
struct intel_context *child;
- struct guc_process_desc *desc = __get_process_desc(ce);
+ struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
unsigned int wqi_size = (ce->parallel.number_children + 4) *
sizeof(u32);
u32 *wqi;
@@ -792,7 +786,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce));
GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
- GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id));
+ GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
/* Insert NOOP if this work queue item will wrap the tail pointer. */
if (wqi_size > wq_space_until_wrap(ce)) {
@@ -801,7 +795,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
return ret;
}
- wqi = get_wq_pointer(desc, ce, wqi_size);
+ wqi = get_wq_pointer(wq_desc, ce, wqi_size);
if (!wqi)
return -EBUSY;
@@ -816,7 +810,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
for_each_child(ce, child)
*wqi++ = child->ring->tail / sizeof(u64);
- write_wqi(desc, ce, wqi_size);
+ write_wqi(wq_desc, ce, wqi_size);
return 0;
}
@@ -920,9 +914,9 @@ register_context:
if (submit) {
struct intel_context *ce = request_to_scheduling_context(last);
- if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) &&
+ if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
!intel_context_is_banned(ce))) {
- ret = guc_lrc_desc_pin(ce, false);
+ ret = try_context_registration(ce, false);
if (unlikely(ret == -EPIPE)) {
goto deadlk;
} else if (ret == -EBUSY) {
@@ -1206,20 +1200,6 @@ static u32 gpm_timestamp_shift(struct intel_gt *gt)
return 3 - shift;
}
-static u64 gpm_timestamp(struct intel_gt *gt)
-{
- u32 lo, hi, old_hi, loop = 0;
-
- hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
- do {
- lo = intel_uncore_read(gt->uncore, MISC_STATUS0);
- old_hi = hi;
- hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
- } while (old_hi != hi && loop++ < 2);
-
- return ((u64)hi << 32) | lo;
-}
-
static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1229,7 +1209,8 @@ static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
lockdep_assert_held(&guc->timestamp.lock);
gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
- gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift;
+ gpm_ts = intel_uncore_read64_2x32(gt->uncore, MISC_STATUS0,
+ MISC_STATUS1) >> guc->timestamp.shift;
gt_stamp_lo = lower_32_bits(gpm_ts);
*now = ktime_get();
@@ -1546,6 +1527,89 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
lrc_update_regs(ce, engine, head);
}
+static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
+{
+ static const i915_reg_t _reg[I915_NUM_ENGINES] = {
+ [RCS0] = MSG_IDLE_CS,
+ [BCS0] = MSG_IDLE_BCS,
+ [VCS0] = MSG_IDLE_VCS0,
+ [VCS1] = MSG_IDLE_VCS1,
+ [VCS2] = MSG_IDLE_VCS2,
+ [VCS3] = MSG_IDLE_VCS3,
+ [VCS4] = MSG_IDLE_VCS4,
+ [VCS5] = MSG_IDLE_VCS5,
+ [VCS6] = MSG_IDLE_VCS6,
+ [VCS7] = MSG_IDLE_VCS7,
+ [VECS0] = MSG_IDLE_VECS0,
+ [VECS1] = MSG_IDLE_VECS1,
+ [VECS2] = MSG_IDLE_VECS2,
+ [VECS3] = MSG_IDLE_VECS3,
+ [CCS0] = MSG_IDLE_CS,
+ [CCS1] = MSG_IDLE_CS,
+ [CCS2] = MSG_IDLE_CS,
+ [CCS3] = MSG_IDLE_CS,
+ };
+ u32 val;
+
+ if (!_reg[engine->id].reg)
+ return 0;
+
+ val = intel_uncore_read(engine->uncore, _reg[engine->id]);
+
+ /* bits[29:25] & bits[13:9] >> shift */
+ return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
+}
+
+static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
+{
+ int ret;
+
+ /* Ensure GPM receives fw up/down after CS is stopped */
+ udelay(1);
+
+ /* Wait for forcewake request to complete in GPM */
+ ret = __intel_wait_for_register_fw(gt->uncore,
+ GEN9_PWRGT_DOMAIN_STATUS,
+ fw_mask, fw_mask, 5000, 0, NULL);
+
+ /* Ensure CS receives fw ack from GPM */
+ udelay(1);
+
+ if (ret)
+ GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
+}
+
+/*
+ * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
+ * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
+ * pending status is indicated by bits[13:9] (masked by bits[ 29:25]) in the
+ * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
+ * are concerned only with the gt reset here, we use a logical OR of pending
+ * forcewakeups from all reset domains and then wait for them to complete by
+ * querying PWRGT_DOMAIN_STATUS.
+ */
+static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
+{
+ u32 fw_pending;
+
+ if (GRAPHICS_VER(engine->i915) != 12)
+ return;
+
+ /*
+ * Wa_22011802037
+ * TODO: Occasionally trying to stop the cs times out, but does not
+ * adversely affect functionality. The timeout is set as a config
+ * parameter that defaults to 100ms. Assuming that this timeout is
+ * sufficient for any pending MI_FORCEWAKEs to complete, ignore the
+ * timeout returned here until it is root caused.
+ */
+ intel_engine_stop_cs(engine);
+
+ fw_pending = __cs_pending_mi_force_wakes(engine);
+ if (fw_pending)
+ __gpm_wait_for_fw_complete(engine->gt, fw_pending);
+}
+
static void guc_reset_nop(struct intel_engine_cs *engine)
{
}
@@ -1804,20 +1868,10 @@ static void reset_fail_worker_func(struct work_struct *w);
int intel_guc_submission_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- int ret;
- if (guc->lrc_desc_pool)
+ if (guc->submission_initialized)
return 0;
- ret = guc_lrc_desc_pool_create(guc);
- if (ret)
- return ret;
- /*
- * Keep static analysers happy, let them know that we allocated the
- * vma after testing that it didn't exist earlier.
- */
- GEM_BUG_ON(!guc->lrc_desc_pool);
-
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
if (!guc->submission_state.guc_ids_bitmap)
@@ -1825,19 +1879,20 @@ int intel_guc_submission_init(struct intel_guc *guc)
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
guc->timestamp.shift = gpm_timestamp_shift(gt);
+ guc->submission_initialized = true;
return 0;
}
void intel_guc_submission_fini(struct intel_guc *guc)
{
- if (!guc->lrc_desc_pool)
+ if (!guc->submission_initialized)
return;
guc_flush_destroyed_contexts(guc);
- guc_lrc_desc_pool_destroy(guc);
i915_sched_engine_put(guc->sched_engine);
bitmap_free(guc->submission_state.guc_ids_bitmap);
+ guc->submission_initialized = false;
}
static inline void queue_request(struct i915_sched_engine *sched_engine,
@@ -1884,7 +1939,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
return submission_disabled(guc) || guc->stalled_request ||
!i915_sched_engine_is_empty(sched_engine) ||
- !lrc_desc_registered(guc, ce->guc_id.id);
+ !ctx_id_mapped(guc, ce->guc_id.id);
}
static void guc_submit_request(struct i915_request *rq)
@@ -1941,7 +1996,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
else
ida_simple_remove(&guc->submission_state.guc_ids,
ce->guc_id.id);
- reset_lrc_desc(guc, ce->guc_id.id);
+ clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
}
if (!list_empty(&ce->guc_id.link))
@@ -2094,65 +2149,96 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
static int __guc_action_register_multi_lrc(struct intel_guc *guc,
struct intel_context *ce,
- u32 guc_id,
- u32 offset,
+ struct guc_ctxt_registration_info *info,
bool loop)
{
struct intel_context *child;
- u32 action[4 + MAX_ENGINE_INSTANCE];
+ u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
int len = 0;
+ u32 next_id;
GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
- action[len++] = guc_id;
+ action[len++] = info->flags;
+ action[len++] = info->context_idx;
+ action[len++] = info->engine_class;
+ action[len++] = info->engine_submit_mask;
+ action[len++] = info->wq_desc_lo;
+ action[len++] = info->wq_desc_hi;
+ action[len++] = info->wq_base_lo;
+ action[len++] = info->wq_base_hi;
+ action[len++] = info->wq_size;
action[len++] = ce->parallel.number_children + 1;
- action[len++] = offset;
+ action[len++] = info->hwlrca_lo;
+ action[len++] = info->hwlrca_hi;
+
+ next_id = info->context_idx + 1;
for_each_child(ce, child) {
- offset += sizeof(struct guc_lrc_desc);
- action[len++] = offset;
+ GEM_BUG_ON(next_id++ != child->guc_id.id);
+
+ /*
+ * NB: GuC interface supports 64 bit LRCA even though i915/HW
+ * only supports 32 bit currently.
+ */
+ action[len++] = lower_32_bits(child->lrc.lrca);
+ action[len++] = upper_32_bits(child->lrc.lrca);
}
+ GEM_BUG_ON(len > ARRAY_SIZE(action));
+
return guc_submission_send_busy_loop(guc, action, len, 0, loop);
}
static int __guc_action_register_context(struct intel_guc *guc,
- u32 guc_id,
- u32 offset,
+ struct guc_ctxt_registration_info *info,
bool loop)
{
u32 action[] = {
INTEL_GUC_ACTION_REGISTER_CONTEXT,
- guc_id,
- offset,
+ info->flags,
+ info->context_idx,
+ info->engine_class,
+ info->engine_submit_mask,
+ info->wq_desc_lo,
+ info->wq_desc_hi,
+ info->wq_base_lo,
+ info->wq_base_hi,
+ info->wq_size,
+ info->hwlrca_lo,
+ info->hwlrca_hi,
};
return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
0, loop);
}
+static void prepare_context_registration_info(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info);
+
static int register_context(struct intel_context *ce, bool loop)
{
+ struct guc_ctxt_registration_info info;
struct intel_guc *guc = ce_to_guc(ce);
- u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
- ce->guc_id.id * sizeof(struct guc_lrc_desc);
int ret;
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
+ prepare_context_registration_info(ce, &info);
+
if (intel_context_is_parent(ce))
- ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id,
- offset, loop);
+ ret = __guc_action_register_multi_lrc(guc, ce, &info, loop);
else
- ret = __guc_action_register_context(guc, ce->guc_id.id, offset,
- loop);
+ ret = __guc_action_register_context(guc, &info, loop);
if (likely(!ret)) {
unsigned long flags;
spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ guc_context_policy_init(ce, loop);
}
return ret;
@@ -2202,33 +2288,120 @@ static inline u32 get_children_join_value(struct intel_context *ce,
return __get_parent_scratch(ce)->join[child_index].semaphore;
}
-static void guc_context_policy_init(struct intel_engine_cs *engine,
- struct guc_lrc_desc *desc)
+struct context_policy {
+ u32 count;
+ struct guc_update_context_policy h2g;
+};
+
+static u32 __guc_context_policy_action_size(struct context_policy *policy)
+{
+ size_t bytes = sizeof(policy->h2g.header) +
+ (sizeof(policy->h2g.klv[0]) * policy->count);
+
+ return bytes / sizeof(u32);
+}
+
+static void __guc_context_policy_start_klv(struct context_policy *policy, u16 guc_id)
{
- desc->policy_flags = 0;
+ policy->h2g.header.action = INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
+ policy->h2g.header.ctx_id = guc_id;
+ policy->count = 0;
+}
- if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
- desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE;
+#define MAKE_CONTEXT_POLICY_ADD(func, id) \
+static void __guc_context_policy_add_##func(struct context_policy *policy, u32 data) \
+{ \
+ GEM_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
+ policy->h2g.klv[policy->count].kl = \
+ FIELD_PREP(GUC_KLV_0_KEY, GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
+ FIELD_PREP(GUC_KLV_0_LEN, 1); \
+ policy->h2g.klv[policy->count].value = data; \
+ policy->count++; \
+}
+
+MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
+MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
+MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
+MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
+
+#undef MAKE_CONTEXT_POLICY_ADD
+
+static int __guc_context_set_context_policies(struct intel_guc *guc,
+ struct context_policy *policy,
+ bool loop)
+{
+ return guc_submission_send_busy_loop(guc, (u32 *)&policy->h2g,
+ __guc_context_policy_action_size(policy),
+ 0, loop);
+}
+
+static int guc_context_policy_init(struct intel_context *ce, bool loop)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ struct context_policy policy;
+ u32 execution_quantum;
+ u32 preemption_timeout;
+ bool missing = false;
+ unsigned long flags;
+ int ret;
/* NB: For both of these, zero means disabled. */
- desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
- desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+ execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+
+ __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_policy_add_execution_quantum(&policy, execution_quantum);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+
+ if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+ __guc_context_policy_add_preempt_to_idle(&policy, 1);
+
+ ret = __guc_context_set_context_policies(guc, &policy, loop);
+ missing = ret != 0;
+
+ if (!missing && intel_context_is_parent(ce)) {
+ struct intel_context *child;
+
+ for_each_child(ce, child) {
+ __guc_context_policy_start_klv(&policy, child->guc_id.id);
+
+ if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+ __guc_context_policy_add_preempt_to_idle(&policy, 1);
+
+ child->guc_state.prio = ce->guc_state.prio;
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_policy_add_execution_quantum(&policy, execution_quantum);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+
+ ret = __guc_context_set_context_policies(guc, &policy, loop);
+ if (ret) {
+ missing = true;
+ break;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ if (missing)
+ set_context_policy_required(ce);
+ else
+ clr_context_policy_required(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ return ret;
}
-static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
+static void prepare_context_registration_info(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
struct intel_guc *guc = &engine->gt->uc.guc;
- u32 desc_idx = ce->guc_id.id;
- struct guc_lrc_desc *desc;
- bool context_registered;
- intel_wakeref_t wakeref;
- struct intel_context *child;
- int ret = 0;
+ u32 ctx_id = ce->guc_id.id;
GEM_BUG_ON(!engine->mask);
- GEM_BUG_ON(!sched_state_is_init(ce));
/*
* Ensure LRC + CT vmas are is same region as write barrier is done
@@ -2237,55 +2410,63 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
i915_gem_object_is_lmem(ce->ring->vma->obj));
- context_registered = lrc_desc_registered(guc, desc_idx);
-
- reset_lrc_desc(guc, desc_idx);
- set_lrc_desc_registered(guc, desc_idx, ce);
-
- desc = __get_lrc_desc(guc, desc_idx);
- desc->engine_class = engine_class_to_guc_class(engine->class);
- desc->engine_submit_mask = engine->logical_mask;
- desc->hw_context_desc = ce->lrc.lrca;
- desc->priority = ce->guc_state.prio;
- desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
- guc_context_policy_init(engine, desc);
+ memset(info, 0, sizeof(*info));
+ info->context_idx = ctx_id;
+ info->engine_class = engine_class_to_guc_class(engine->class);
+ info->engine_submit_mask = engine->logical_mask;
+ /*
+ * NB: GuC interface supports 64 bit LRCA even though i915/HW
+ * only supports 32 bit currently.
+ */
+ info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
+ info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
+ info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
/*
* If context is a parent, we need to register a process descriptor
* describing a work queue and register all child contexts.
*/
if (intel_context_is_parent(ce)) {
- struct guc_process_desc *pdesc;
+ struct guc_sched_wq_desc *wq_desc;
+ u64 wq_desc_offset, wq_base_offset;
ce->parallel.guc.wqi_tail = 0;
ce->parallel.guc.wqi_head = 0;
- desc->process_desc = i915_ggtt_offset(ce->state) +
- __get_parent_scratch_offset(ce);
- desc->wq_addr = i915_ggtt_offset(ce->state) +
- __get_wq_offset(ce);
- desc->wq_size = WQ_SIZE;
-
- pdesc = __get_process_desc(ce);
- memset(pdesc, 0, sizeof(*(pdesc)));
- pdesc->stage_id = ce->guc_id.id;
- pdesc->wq_base_addr = desc->wq_addr;
- pdesc->wq_size_bytes = desc->wq_size;
- pdesc->wq_status = WQ_STATUS_ACTIVE;
+ wq_desc_offset = i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+ wq_base_offset = i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ info->wq_desc_lo = lower_32_bits(wq_desc_offset);
+ info->wq_desc_hi = upper_32_bits(wq_desc_offset);
+ info->wq_base_lo = lower_32_bits(wq_base_offset);
+ info->wq_base_hi = upper_32_bits(wq_base_offset);
+ info->wq_size = WQ_SIZE;
- for_each_child(ce, child) {
- desc = __get_lrc_desc(guc, child->guc_id.id);
-
- desc->engine_class =
- engine_class_to_guc_class(engine->class);
- desc->hw_context_desc = child->lrc.lrca;
- desc->priority = ce->guc_state.prio;
- desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
- guc_context_policy_init(engine, desc);
- }
+ wq_desc = __get_wq_desc(ce);
+ memset(wq_desc, 0, sizeof(*wq_desc));
+ wq_desc->wq_status = WQ_STATUS_ACTIVE;
clear_children_join_go_memory(ce);
}
+}
+
+static int try_context_registration(struct intel_context *ce, bool loop)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ intel_wakeref_t wakeref;
+ u32 ctx_id = ce->guc_id.id;
+ bool context_registered;
+ int ret = 0;
+
+ GEM_BUG_ON(!sched_state_is_init(ce));
+
+ context_registered = ctx_id_mapped(guc, ctx_id);
+
+ clr_ctx_id_mapping(guc, ctx_id);
+ set_ctx_id_mapping(guc, ctx_id, ce);
/*
* The context_lookup xarray is used to determine if the hardware
@@ -2311,7 +2492,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(disabled)) {
- reset_lrc_desc(guc, desc_idx);
+ clr_ctx_id_mapping(guc, ctx_id);
return 0; /* Will get registered later */
}
@@ -2327,9 +2508,9 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
with_intel_runtime_pm(runtime_pm, wakeref)
ret = register_context(ce, loop);
if (unlikely(ret == -EBUSY)) {
- reset_lrc_desc(guc, desc_idx);
+ clr_ctx_id_mapping(guc, ctx_id);
} else if (unlikely(ret == -ENODEV)) {
- reset_lrc_desc(guc, desc_idx);
+ clr_ctx_id_mapping(guc, ctx_id);
ret = 0; /* Will get registered later */
}
}
@@ -2419,7 +2600,7 @@ static void __guc_context_sched_disable(struct intel_guc *guc,
GUC_CONTEXT_DISABLE
};
- GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
+ GEM_BUG_ON(guc_id == GUC_INVALID_CONTEXT_ID);
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_sched_disable(ce);
@@ -2516,7 +2697,7 @@ static bool context_cant_unblock(struct intel_context *ce)
return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
context_guc_id_invalid(ce) ||
- !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) ||
+ !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
!intel_context_is_pinned(ce);
}
@@ -2580,13 +2761,11 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
- u32 action[] = {
- INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
- guc_id,
- preemption_timeout
- };
+ struct context_policy policy;
- intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ __guc_context_policy_start_klv(&policy, guc_id);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_set_context_policies(guc, &policy, true);
}
static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
@@ -2686,7 +2865,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
bool disabled;
GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
- GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
+ GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
GEM_BUG_ON(context_enabled(ce));
@@ -2803,7 +2982,7 @@ static void guc_context_destroy(struct kref *kref)
*/
spin_lock_irqsave(&guc->submission_state.lock, flags);
destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
- !lrc_desc_registered(guc, ce->guc_id.id);
+ !ctx_id_mapped(guc, ce->guc_id.id);
if (likely(!destroy)) {
if (!list_empty(&ce->guc_id.link))
list_del_init(&ce->guc_id.link);
@@ -2831,16 +3010,20 @@ static int guc_context_alloc(struct intel_context *ce)
return lrc_alloc(ce, ce->engine);
}
+static void __guc_context_set_prio(struct intel_guc *guc,
+ struct intel_context *ce)
+{
+ struct context_policy policy;
+
+ __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_set_context_policies(guc, &policy, true);
+}
+
static void guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce,
u8 prio)
{
- u32 action[] = {
- INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY,
- ce->guc_id.id,
- prio,
- };
-
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
prio > GUC_CLIENT_PRIORITY_NORMAL);
lockdep_assert_held(&ce->guc_state.lock);
@@ -2851,9 +3034,9 @@ static void guc_context_set_prio(struct intel_guc *guc,
return;
}
- guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
-
ce->guc_state.prio = prio;
+ __guc_context_set_prio(guc, ce);
+
trace_intel_context_set_prio(ce);
}
@@ -3046,7 +3229,7 @@ static void guc_signal_context_fence(struct intel_context *ce)
static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
{
return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
- !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) &&
+ !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
!submission_disabled(ce_to_guc(ce));
}
@@ -3123,7 +3306,7 @@ static int guc_request_alloc(struct i915_request *rq)
if (unlikely(ret < 0))
return ret;
if (context_needs_register(ce, !!ret)) {
- ret = guc_lrc_desc_pin(ce, true);
+ ret = try_context_registration(ce, true);
if (unlikely(ret)) { /* unwind */
if (ret == -EPIPE) {
disable_submission(guc);
@@ -3560,7 +3743,7 @@ static void guc_sanitize(struct intel_engine_cs *engine)
sanitize_hwsp(engine);
/* And scrub the dirty cachelines for the HWSP */
- clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+ drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
intel_engine_reset_pinned_contexts(engine);
}
@@ -3595,7 +3778,7 @@ static int guc_resume(struct intel_engine_cs *engine)
setup_hwsp(engine);
start_engine(engine);
- if (engine->class == RENDER_CLASS)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
xehp_enable_ccs_engines(engine);
return 0;
@@ -3614,9 +3797,17 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
static inline void guc_kernel_context_pin(struct intel_guc *guc,
struct intel_context *ce)
{
+ /*
+ * Note: we purposefully do not check the returns below because
+ * the registration can only fail if a reset is just starting.
+ * This is called at the end of reset so presumably another reset
+ * isn't happening and even it did this code would be run again.
+ */
+
if (context_guc_id_invalid(ce))
pin_guc_id(guc, ce);
- guc_lrc_desc_pin(ce, true);
+
+ try_context_registration(ce, true);
}
static inline void guc_init_lrc_mapping(struct intel_guc *guc)
@@ -3634,13 +3825,7 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
* Also, after a reset the of the GuC we want to make sure that the
* information shared with GuC is properly reset. The kernel LRCs are
* not attached to the gem_context, so they need to be added separately.
- *
- * Note: we purposefully do not check the return of guc_lrc_desc_pin,
- * because that function can only fail if a reset is just starting. This
- * is at the end of reset so presumably another reset isn't happening
- * and even it did this code would be run again.
*/
-
for_each_engine(engine, gt, id) {
struct intel_context *ce;
@@ -3680,7 +3865,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->sched_engine->schedule = i915_schedule;
- engine->reset.prepare = guc_reset_nop;
+ engine->reset.prepare = guc_engine_reset_prepare;
engine->reset.rewind = guc_rewind_nop;
engine->reset.cancel = guc_reset_nop;
engine->reset.finish = guc_reset_nop;
@@ -3699,6 +3884,10 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ /* Wa_14014475959:dg2 */
+ if (IS_DG2(engine->i915) && engine->class == COMPUTE_CLASS)
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+
/*
* TODO: GuC supports timeslicing and semaphores as well, but they're
* handled by the firmware so some minor tweaks are required before
@@ -3708,6 +3897,8 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
*/
engine->emit_bb_start = gen8_emit_bb_start;
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ engine->emit_bb_start = gen125_emit_bb_start;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
@@ -3835,32 +4026,32 @@ void intel_guc_submission_init_early(struct intel_guc *guc)
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
- guc->submission_state.num_guc_ids = GUC_MAX_LRC_DESCRIPTORS;
+ guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID;
guc->submission_supported = __guc_submission_supported(guc);
guc->submission_selected = __guc_submission_selected(guc);
}
static inline struct intel_context *
-g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
+g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
{
struct intel_context *ce;
- if (unlikely(desc_idx >= GUC_MAX_LRC_DESCRIPTORS)) {
+ if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) {
drm_err(&guc_to_gt(guc)->i915->drm,
- "Invalid desc_idx %u", desc_idx);
+ "Invalid ctx_id %u\n", ctx_id);
return NULL;
}
- ce = __get_context(guc, desc_idx);
+ ce = __get_context(guc, ctx_id);
if (unlikely(!ce)) {
drm_err(&guc_to_gt(guc)->i915->drm,
- "Context is NULL, desc_idx %u", desc_idx);
+ "Context is NULL, ctx_id %u\n", ctx_id);
return NULL;
}
if (unlikely(intel_context_is_child(ce))) {
drm_err(&guc_to_gt(guc)->i915->drm,
- "Context is child, desc_idx %u", desc_idx);
+ "Context is child, ctx_id %u\n", ctx_id);
return NULL;
}
@@ -3872,14 +4063,15 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
u32 len)
{
struct intel_context *ce;
- u32 desc_idx = msg[0];
+ u32 ctx_id;
if (unlikely(len < 1)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
return -EPROTO;
}
+ ctx_id = msg[0];
- ce = g2h_context_lookup(guc, desc_idx);
+ ce = g2h_context_lookup(guc, ctx_id);
if (unlikely(!ce))
return -EPROTO;
@@ -3923,14 +4115,15 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
{
struct intel_context *ce;
unsigned long flags;
- u32 desc_idx = msg[0];
+ u32 ctx_id;
if (unlikely(len < 2)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
return -EPROTO;
}
+ ctx_id = msg[0];
- ce = g2h_context_lookup(guc, desc_idx);
+ ce = g2h_context_lookup(guc, ctx_id);
if (unlikely(!ce))
return -EPROTO;
@@ -3938,8 +4131,8 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
(!context_pending_enable(ce) &&
!context_pending_disable(ce)))) {
drm_err(&guc_to_gt(guc)->i915->drm,
- "Bad context sched_state 0x%x, desc_idx %u",
- ce->guc_state.sched_state, desc_idx);
+ "Bad context sched_state 0x%x, ctx_id %u\n",
+ ce->guc_state.sched_state, ctx_id);
return -EPROTO;
}
@@ -4005,7 +4198,7 @@ static void capture_error_state(struct intel_guc *guc,
intel_engine_set_hung_context(engine, ce);
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- i915_capture_error_state(gt, engine->mask);
+ i915_capture_error_state(gt, engine->mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE);
atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]);
}
@@ -4037,14 +4230,14 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
{
struct intel_context *ce;
unsigned long flags;
- int desc_idx;
+ int ctx_id;
if (unlikely(len != 1)) {
drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
return -EPROTO;
}
- desc_idx = msg[0];
+ ctx_id = msg[0];
/*
* The context lookup uses the xarray but lookups only require an RCU lock
@@ -4053,7 +4246,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
* asynchronously until the reset is done.
*/
xa_lock_irqsave(&guc->context_lookup, flags);
- ce = g2h_context_lookup(guc, desc_idx);
+ ce = g2h_context_lookup(guc, ctx_id);
if (ce)
intel_context_get(ce);
xa_unlock_irqrestore(&guc->context_lookup, flags);
@@ -4070,23 +4263,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
int intel_guc_error_capture_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
- int status;
+ u32 status;
if (unlikely(len != 1)) {
drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
return -EPROTO;
}
- status = msg[0];
- drm_info(&guc_to_gt(guc)->i915->drm, "Got error capture: status = %d", status);
+ status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
+ if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
+ drm_warn(&guc_to_gt(guc)->i915->drm, "G2H-Error capture no space");
- /* FIXME: Do something with the capture */
+ intel_guc_capture_process(guc);
return 0;
}
-static struct intel_engine_cs *
-guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
+struct intel_engine_cs *
+intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
{
struct intel_gt *gt = guc_to_gt(guc);
u8 engine_class = guc_class_to_engine_class(guc_class);
@@ -4135,7 +4329,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
instance = msg[1];
reason = msg[2];
- engine = guc_lookup_engine(guc, guc_class, instance);
+ engine = intel_guc_lookup_engine(guc, guc_class, instance);
if (unlikely(!engine)) {
drm_err(&gt->i915->drm,
"Invalid engine %d:%d", guc_class, instance);
@@ -4333,17 +4527,17 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
guc_log_context_priority(p, ce);
if (intel_context_is_parent(ce)) {
- struct guc_process_desc *desc = __get_process_desc(ce);
+ struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
struct intel_context *child;
drm_printf(p, "\t\tNumber children: %u\n",
ce->parallel.number_children);
drm_printf(p, "\t\tWQI Head: %u\n",
- READ_ONCE(desc->head));
+ READ_ONCE(wq_desc->head));
drm_printf(p, "\t\tWQI Tail: %u\n",
- READ_ONCE(desc->tail));
+ READ_ONCE(wq_desc->tail));
drm_printf(p, "\t\tWQI Status: %u\n\n",
- READ_ONCE(desc->wq_status));
+ READ_ONCE(wq_desc->wq_status));
if (ce->engine->emit_bb_start ==
emit_bb_start_parent_no_preempt_mid_batch) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 8eb34de2f20c..e8f099360e01 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -3,6 +3,8 @@
* Copyright © 2016-2019 Intel Corporation
*/
+#include <linux/string_helpers.h>
+
#include "gt/intel_gt.h"
#include "gt/intel_reset.h"
#include "intel_guc.h"
@@ -78,10 +80,10 @@ static void __confirm_options(struct intel_uc *uc)
drm_dbg(&i915->drm,
"enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
i915->params.enable_guc,
- yesno(intel_uc_wants_guc(uc)),
- yesno(intel_uc_wants_guc_submission(uc)),
- yesno(intel_uc_wants_huc(uc)),
- yesno(intel_uc_wants_guc_slpc(uc)));
+ str_yes_no(intel_uc_wants_guc(uc)),
+ str_yes_no(intel_uc_wants_guc_submission(uc)),
+ str_yes_no(intel_uc_wants_huc(uc)),
+ str_yes_no(intel_uc_wants_guc_slpc(uc)));
if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
@@ -522,9 +524,9 @@ static int __uc_init_hw(struct intel_uc *uc)
}
drm_info(&i915->drm, "GuC submission %s\n",
- enableddisabled(intel_uc_uses_guc_submission(uc)));
+ str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
drm_info(&i915->drm, "GuC SLPC %s\n",
- enableddisabled(intel_uc_uses_guc_slpc(uc)));
+ str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
return 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
index c2f7924295e7..284d6fbc2d08 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -4,6 +4,8 @@
*/
#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
+
#include <drm/drm_print.h>
#include "gt/intel_gt_debugfs.h"
@@ -18,17 +20,17 @@ static int uc_usage_show(struct seq_file *m, void *data)
struct drm_printer p = drm_seq_file_printer(m);
drm_printf(&p, "[guc] supported:%s wanted:%s used:%s\n",
- yesno(intel_uc_supports_guc(uc)),
- yesno(intel_uc_wants_guc(uc)),
- yesno(intel_uc_uses_guc(uc)));
+ str_yes_no(intel_uc_supports_guc(uc)),
+ str_yes_no(intel_uc_wants_guc(uc)),
+ str_yes_no(intel_uc_uses_guc(uc)));
drm_printf(&p, "[huc] supported:%s wanted:%s used:%s\n",
- yesno(intel_uc_supports_huc(uc)),
- yesno(intel_uc_wants_huc(uc)),
- yesno(intel_uc_uses_huc(uc)));
+ str_yes_no(intel_uc_supports_huc(uc)),
+ str_yes_no(intel_uc_wants_huc(uc)),
+ str_yes_no(intel_uc_uses_huc(uc)));
drm_printf(&p, "[submission] supported:%s wanted:%s used:%s\n",
- yesno(intel_uc_supports_guc_submission(uc)),
- yesno(intel_uc_wants_guc_submission(uc)),
- yesno(intel_uc_uses_guc_submission(uc)));
+ str_yes_no(intel_uc_supports_guc_submission(uc)),
+ str_yes_no(intel_uc_wants_guc_submission(uc)),
+ str_yes_no(intel_uc_uses_guc_submission(uc)));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index c88113044494..d078f884b5e3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -5,6 +5,7 @@
#include <linux/bitfield.h>
#include <linux/firmware.h>
+#include <linux/highmem.h>
#include <drm/drm_cache.h>
#include <drm/drm_print.h>
@@ -52,21 +53,22 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* firmware as TGL.
*/
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3)) \
- fw_def(DG1, 0, guc_def(dg1, 69, 0, 3)) \
- fw_def(ROCKETLAKE, 0, guc_def(tgl, 69, 0, 3)) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 69, 0, 3)) \
- fw_def(JASPERLAKE, 0, guc_def(ehl, 69, 0, 3)) \
- fw_def(ELKHARTLAKE, 0, guc_def(ehl, 69, 0, 3)) \
- fw_def(ICELAKE, 0, guc_def(icl, 69, 0, 3)) \
- fw_def(COMETLAKE, 5, guc_def(cml, 69, 0, 3)) \
- fw_def(COMETLAKE, 0, guc_def(kbl, 69, 0, 3)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 69, 0, 3)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 69, 0, 3)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 69, 0, 3)) \
- fw_def(BROXTON, 0, guc_def(bxt, 69, 0, 3)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 69, 0, 3))
+ fw_def(DG2, 0, guc_def(dg2, 70, 1, 2)) \
+ fw_def(ALDERLAKE_P, 0, guc_def(adlp, 70, 1, 1)) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 70, 1, 1)) \
+ fw_def(DG1, 0, guc_def(dg1, 70, 1, 1)) \
+ fw_def(ROCKETLAKE, 0, guc_def(tgl, 70, 1, 1)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 70, 1, 1)) \
+ fw_def(JASPERLAKE, 0, guc_def(ehl, 70, 1, 1)) \
+ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 70, 1, 1)) \
+ fw_def(ICELAKE, 0, guc_def(icl, 70, 1, 1)) \
+ fw_def(COMETLAKE, 5, guc_def(cml, 70, 1, 1)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 70, 1, 1)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 70, 1, 1)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 70, 1, 1)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 70, 1, 1)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1))
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index a115894d5896..1df71d0796ae 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -148,7 +148,7 @@ static int intel_guc_steal_guc_ids(void *arg)
struct i915_request *spin_rq = NULL, *rq, *last = NULL;
int number_guc_id_stolen = guc->number_guc_id_stolen;
- ce = kzalloc(sizeof(*ce) * GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL);
+ ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
if (!ce) {
pr_err("Context array allocation failed\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index ea8324abc784..1699f644298e 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,9 +1,25 @@
# SPDX-License-Identifier: GPL-2.0
-GVT_DIR := gvt
-GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
- interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
- execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
- fb_decoder.o dmabuf.o page_track.o
-ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
-i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+kvmgt-$(CONFIG_DRM_I915_GVT) += \
+ gvt/aperture_gm.o \
+ gvt/cfg_space.o \
+ gvt/cmd_parser.o \
+ gvt/debugfs.o \
+ gvt/display.o \
+ gvt/dmabuf.o \
+ gvt/edid.o \
+ gvt/execlist.o \
+ gvt/fb_decoder.o \
+ gvt/firmware.o \
+ gvt/gtt.o \
+ gvt/handlers.o \
+ gvt/interrupt.o \
+ gvt/kvmgt.o \
+ gvt/mmio.o \
+ gvt/mmio_context.o \
+ gvt/opregion.o \
+ gvt/page_track.o \
+ gvt/sched_policy.o \
+ gvt/scheduler.o \
+ gvt/trace_points.o \
+ gvt/vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index b490e3db2e38..dad3a6054335 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -129,60 +129,16 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
-static int map_aperture(struct intel_vgpu *vgpu, bool map)
+static void map_aperture(struct intel_vgpu *vgpu, bool map)
{
- phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
- unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
- u64 first_gfn;
- u64 val;
- int ret;
-
- if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
- return 0;
-
- val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
- if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
- val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
- else
- val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
-
- first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
-
- ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
- aperture_pa >> PAGE_SHIFT,
- aperture_sz >> PAGE_SHIFT,
- map);
- if (ret)
- return ret;
-
- vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
- return 0;
+ if (map != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
}
-static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
+static void trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
{
- u64 start, end;
- u64 val;
- int ret;
-
- if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
- return 0;
-
- val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
- if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
- start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
- else
- start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
-
- start &= ~GENMASK(3, 0);
- end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
-
- ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
- if (ret)
- return ret;
-
- vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
- return 0;
+ if (trap != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
}
static int emulate_pci_command_write(struct intel_vgpu *vgpu,
@@ -191,26 +147,17 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 old = vgpu_cfg_space(vgpu)[offset];
u8 new = *(u8 *)p_data;
u8 changed = old ^ new;
- int ret;
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
if (old & PCI_COMMAND_MEMORY) {
- ret = trap_gttmmio(vgpu, false);
- if (ret)
- return ret;
- ret = map_aperture(vgpu, false);
- if (ret)
- return ret;
+ trap_gttmmio(vgpu, false);
+ map_aperture(vgpu, false);
} else {
- ret = trap_gttmmio(vgpu, true);
- if (ret)
- return ret;
- ret = map_aperture(vgpu, true);
- if (ret)
- return ret;
+ trap_gttmmio(vgpu, true);
+ map_aperture(vgpu, true);
}
return 0;
@@ -230,13 +177,12 @@ static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
return 0;
}
-static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
+static void emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 new = *(u32 *)(p_data);
bool lo = IS_ALIGNED(offset, 8);
u64 size;
- int ret = 0;
bool mmio_enabled =
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
@@ -259,14 +205,14 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
* Untrap the BAR, since guest hasn't configured a
* valid GPA
*/
- ret = trap_gttmmio(vgpu, false);
+ trap_gttmmio(vgpu, false);
break;
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
intel_vgpu_write_pci_bar(vgpu, offset,
size >> (lo ? 0 : 32), lo);
- ret = map_aperture(vgpu, false);
+ map_aperture(vgpu, false);
break;
default:
/* Unimplemented BARs */
@@ -282,19 +228,18 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
*/
trap_gttmmio(vgpu, false);
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
- ret = trap_gttmmio(vgpu, mmio_enabled);
+ trap_gttmmio(vgpu, mmio_enabled);
break;
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
map_aperture(vgpu, false);
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
- ret = map_aperture(vgpu, mmio_enabled);
+ map_aperture(vgpu, mmio_enabled);
break;
default:
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
}
}
- return ret;
}
/**
@@ -336,8 +281,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
- return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
-
+ emulate_pci_bar_write(vgpu, offset, p_data, bytes);
+ break;
case INTEL_GVT_PCI_SWSCI:
if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 2459213b6c87..b9eb75a2b400 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1011,7 +1011,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (GRAPHICS_VER(s->engine->i915) == 9 &&
intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
- intel_gvt_hypervisor_read_gpa(s->vgpu,
+ intel_gvt_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
if (ctx_sr_ctl & 1) {
@@ -1775,7 +1775,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
I915_GTT_PAGE_SIZE - offset : end_gma - gma;
- intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
+ intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len);
len += copy_len;
gma += copy_len;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index c95c25d2addb..01e54b45c5c1 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -29,7 +29,7 @@
*/
#include <linux/dma-buf.h>
-#include <linux/vfio.h>
+#include <linux/mdev.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
@@ -42,24 +42,6 @@
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
-static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
- unsigned long size,
- dma_addr_t dma_addr)
-{
- int ret = 0;
-
- if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
- ret = -EINVAL;
-
- return ret;
-}
-
-static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
- dma_addr_t dma_addr)
-{
- intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
-}
-
static int vgpu_gem_get_pages(
struct drm_i915_gem_object *obj)
{
@@ -95,7 +77,7 @@ static int vgpu_gem_get_pages(
for_each_sg(st->sgl, sg, page_num, i) {
dma_addr_t dma_addr =
GEN8_DECODE_PTE(readq(&gtt_entries[i]));
- if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
+ if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) {
ret = -EINVAL;
goto out;
}
@@ -114,7 +96,7 @@ out:
for_each_sg(st->sgl, sg, i, j) {
dma_addr = sg_dma_address(sg);
if (dma_addr)
- vgpu_unpin_dma_address(vgpu, dma_addr);
+ intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
}
sg_free_table(st);
kfree(st);
@@ -136,7 +118,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
int i;
for_each_sg(pages->sgl, sg, fb_info->size, i)
- vgpu_unpin_dma_address(vgpu,
+ intel_gvt_dma_unmap_guest_page(vgpu,
sg_dma_address(sg));
}
@@ -157,7 +139,6 @@ static void dmabuf_gem_object_free(struct kref *kref)
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
list_del(pos);
- intel_gvt_hypervisor_put_vfio_device(vgpu);
idr_remove(&vgpu->object_idr,
dmabuf_obj->dmabuf_id);
kfree(dmabuf_obj->info);
@@ -491,14 +472,6 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
kref_init(&dmabuf_obj->kref);
- mutex_lock(&vgpu->dmabuf_lock);
- if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
- gvt_vgpu_err("get vfio device failed\n");
- mutex_unlock(&vgpu->dmabuf_lock);
- goto out_free_info;
- }
- mutex_unlock(&vgpu->dmabuf_lock);
-
update_fb_info(gfx_plane_info, &fb_info);
INIT_LIST_HEAD(&dmabuf_obj->list);
@@ -603,7 +576,6 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
dmabuf_obj->vgpu = NULL;
idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
- intel_gvt_hypervisor_put_vfio_device(vgpu);
list_del(pos);
/* dmabuf_obj might be freed in dmabuf_obj_put */
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 66d354c4195b..274c6ef42400 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -159,12 +159,12 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
vgpu->hws_pga[execlist->engine->id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
- intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
- status, 8);
- intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
- &write_pointer, 4);
+ intel_gvt_write_gpa(vgpu,
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
+ status, 8);
+ intel_gvt_write_gpa(vgpu,
+ hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
+ &write_pointer, 4);
}
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 1a8274a3f4b1..54fe442238c6 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -66,22 +66,16 @@ static struct bin_attribute firmware_attr = {
.mmap = NULL,
};
-static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
-{
- *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
- _MMIO(offset));
- return 0;
-}
-
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
+ struct drm_i915_private *i915 = gvt->gt->i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
- int i, ret;
+ int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
@@ -99,17 +93,16 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
p = firmware + h->cfg_space_offset;
- for (i = 0; i < h->cfg_space_size; i += 4)
- pci_read_config_dword(pdev, i, p + i);
-
- memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
+ memcpy(gvt->firmware.cfg_space, i915->vgpu.initial_cfg_space,
+ info->cfg_space_size);
+ memcpy(p, gvt->firmware.cfg_space, info->cfg_space_size);
p = firmware + h->mmio_offset;
- /* Take a snapshot of hw mmio registers. */
- intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p);
+ memcpy(gvt->firmware.mmio, i915->vgpu.initial_mmio,
+ info->mmio_size);
- memcpy(gvt->firmware.mmio, p, info->mmio_size);
+ memcpy(p, gvt->firmware.mmio, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index d4082f4b9be1..9c5cc2800975 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,6 +49,22 @@
static bool enable_out_of_sync = false;
static int preallocated_oos_pages = 8192;
+static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct kvm *kvm = vgpu->kvm;
+ int idx;
+ bool ret;
+
+ if (!vgpu->attached)
+ return false;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ ret = kvm_is_visible_gfn(kvm, gfn);
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ return ret;
+}
+
/*
* validate a gm address and related range size,
* translate it to host gm address
@@ -314,7 +330,7 @@ static inline int gtt_get_entry64(void *pt,
return -EINVAL;
if (hypervisor_access) {
- ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
+ ret = intel_gvt_read_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
if (WARN_ON(ret))
@@ -339,7 +355,7 @@ static inline int gtt_set_entry64(void *pt,
return -EINVAL;
if (hypervisor_access) {
- ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
+ ret = intel_gvt_write_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
if (WARN_ON(ret))
@@ -997,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
return;
- intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
+ intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
}
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
@@ -1162,15 +1178,16 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *entry)
{
const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- unsigned long pfn;
+ kvm_pfn_t pfn;
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
- pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
- if (pfn == INTEL_GVT_INVALID_ADDR)
+ if (!vgpu->attached)
+ return -EINVAL;
+ pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry));
+ if (is_error_noslot_pfn(pfn))
return -EINVAL;
-
return PageTransHuge(pfn_to_page(pfn));
}
@@ -1195,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
return PTR_ERR(sub_spt);
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
- start_gfn + sub_index, PAGE_SIZE, &dma_addr);
+ ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
+ PAGE_SIZE, &dma_addr);
if (ret) {
ppgtt_invalidate_spt(spt);
return ret;
@@ -1241,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_64k_splited(&entry);
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
- start_gfn + i, PAGE_SIZE, &dma_addr);
+ ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
+ PAGE_SIZE, &dma_addr);
if (ret)
return ret;
@@ -1296,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
}
/* direct shadow */
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
- &dma_addr);
+ ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
if (ret)
return -ENXIO;
@@ -1331,7 +1347,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
ppgtt_set_shadow_entry(spt, &se, i);
} else {
gfn = ops->get_pfn(&ge);
- if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
+ if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
ppgtt_set_shadow_entry(spt, &se, i);
continue;
@@ -1497,7 +1513,7 @@ static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
struct intel_gvt *gvt = spt->vgpu->gvt;
int ret;
- ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
+ ret = intel_gvt_read_gpa(spt->vgpu,
spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
oos_page->mem, I915_GTT_PAGE_SIZE);
if (ret)
@@ -2228,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
pfn = pte_ops->get_pfn(entry);
if (pfn != vgpu->gvt->gtt.scratch_mfn)
- intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
- pfn << PAGE_SHIFT);
+ intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
}
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
@@ -2315,13 +2330,13 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
*/
- if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
+ if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
goto out;
}
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
- PAGE_SIZE, &dma_addr);
+ ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
+ &dma_addr);
if (ret) {
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
deleted file mode 100644
index f0b69e4dcb52..000000000000
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- * Kevin Tian <kevin.tian@intel.com>
- * Eddie Dong <eddie.dong@intel.com>
- *
- * Contributors:
- * Niu Bing <bing.niu@intel.com>
- * Zhi Wang <zhi.a.wang@intel.com>
- *
- */
-
-#include <linux/types.h>
-#include <linux/kthread.h>
-
-#include "i915_drv.h"
-#include "intel_gvt.h"
-#include "gvt.h"
-#include <linux/vfio.h>
-#include <linux/mdev.h>
-
-struct intel_gvt_host intel_gvt_host;
-
-static const char * const supported_hypervisors[] = {
- [INTEL_GVT_HYPERVISOR_XEN] = "XEN",
- [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
-};
-
-static const struct intel_gvt_ops intel_gvt_ops = {
- .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
- .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
- .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
- .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
- .vgpu_create = intel_gvt_create_vgpu,
- .vgpu_destroy = intel_gvt_destroy_vgpu,
- .vgpu_release = intel_gvt_release_vgpu,
- .vgpu_reset = intel_gvt_reset_vgpu,
- .vgpu_activate = intel_gvt_activate_vgpu,
- .vgpu_deactivate = intel_gvt_deactivate_vgpu,
- .vgpu_query_plane = intel_vgpu_query_plane,
- .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
- .write_protect_handler = intel_vgpu_page_track_handler,
- .emulate_hotplug = intel_vgpu_emulate_hotplug,
-};
-
-static void init_device_info(struct intel_gvt *gvt)
-{
- struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
-
- info->max_support_vgpus = 8;
- info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
- info->mmio_size = 2 * 1024 * 1024;
- info->mmio_bar = 0;
- info->gtt_start_offset = 8 * 1024 * 1024;
- info->gtt_entry_size = 8;
- info->gtt_entry_size_shift = 3;
- info->gmadr_bytes_in_cmd = 8;
- info->max_surface_size = 36 * 1024 * 1024;
- info->msi_cap_offset = pdev->msi_cap;
-}
-
-static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
-{
- struct intel_vgpu *vgpu;
- int id;
-
- mutex_lock(&gvt->lock);
- idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
- if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
- (void *)&gvt->service_request)) {
- if (vgpu->active)
- intel_vgpu_emulate_vblank(vgpu);
- }
- }
- mutex_unlock(&gvt->lock);
-}
-
-static int gvt_service_thread(void *data)
-{
- struct intel_gvt *gvt = (struct intel_gvt *)data;
- int ret;
-
- gvt_dbg_core("service thread start\n");
-
- while (!kthread_should_stop()) {
- ret = wait_event_interruptible(gvt->service_thread_wq,
- kthread_should_stop() || gvt->service_request);
-
- if (kthread_should_stop())
- break;
-
- if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
- continue;
-
- intel_gvt_test_and_emulate_vblank(gvt);
-
- if (test_bit(INTEL_GVT_REQUEST_SCHED,
- (void *)&gvt->service_request) ||
- test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
- (void *)&gvt->service_request)) {
- intel_gvt_schedule(gvt);
- }
- }
-
- return 0;
-}
-
-static void clean_service_thread(struct intel_gvt *gvt)
-{
- kthread_stop(gvt->service_thread);
-}
-
-static int init_service_thread(struct intel_gvt *gvt)
-{
- init_waitqueue_head(&gvt->service_thread_wq);
-
- gvt->service_thread = kthread_run(gvt_service_thread,
- gvt, "gvt_service_thread");
- if (IS_ERR(gvt->service_thread)) {
- gvt_err("fail to start service thread.\n");
- return PTR_ERR(gvt->service_thread);
- }
- return 0;
-}
-
-/**
- * intel_gvt_clean_device - clean a GVT device
- * @i915: i915 private
- *
- * This function is called at the driver unloading stage, to free the
- * resources owned by a GVT device.
- *
- */
-void intel_gvt_clean_device(struct drm_i915_private *i915)
-{
- struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
-
- if (drm_WARN_ON(&i915->drm, !gvt))
- return;
-
- intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
- intel_gvt_clean_vgpu_types(gvt);
-
- intel_gvt_debugfs_clean(gvt);
- clean_service_thread(gvt);
- intel_gvt_clean_cmd_parser(gvt);
- intel_gvt_clean_sched_policy(gvt);
- intel_gvt_clean_workload_scheduler(gvt);
- intel_gvt_clean_gtt(gvt);
- intel_gvt_free_firmware(gvt);
- intel_gvt_clean_mmio_info(gvt);
- idr_destroy(&gvt->vgpu_idr);
-
- kfree(i915->gvt);
-}
-
-/**
- * intel_gvt_init_device - initialize a GVT device
- * @i915: drm i915 private data
- *
- * This function is called at the initialization stage, to initialize
- * necessary GVT components.
- *
- * Returns:
- * Zero on success, negative error code if failed.
- *
- */
-int intel_gvt_init_device(struct drm_i915_private *i915)
-{
- struct intel_gvt *gvt;
- struct intel_vgpu *vgpu;
- int ret;
-
- if (drm_WARN_ON(&i915->drm, i915->gvt))
- return -EEXIST;
-
- gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
- if (!gvt)
- return -ENOMEM;
-
- gvt_dbg_core("init gvt device\n");
-
- idr_init_base(&gvt->vgpu_idr, 1);
- spin_lock_init(&gvt->scheduler.mmio_context_lock);
- mutex_init(&gvt->lock);
- mutex_init(&gvt->sched_lock);
- gvt->gt = to_gt(i915);
- i915->gvt = gvt;
-
- init_device_info(gvt);
-
- ret = intel_gvt_setup_mmio_info(gvt);
- if (ret)
- goto out_clean_idr;
-
- intel_gvt_init_engine_mmio_context(gvt);
-
- ret = intel_gvt_load_firmware(gvt);
- if (ret)
- goto out_clean_mmio_info;
-
- ret = intel_gvt_init_irq(gvt);
- if (ret)
- goto out_free_firmware;
-
- ret = intel_gvt_init_gtt(gvt);
- if (ret)
- goto out_free_firmware;
-
- ret = intel_gvt_init_workload_scheduler(gvt);
- if (ret)
- goto out_clean_gtt;
-
- ret = intel_gvt_init_sched_policy(gvt);
- if (ret)
- goto out_clean_workload_scheduler;
-
- ret = intel_gvt_init_cmd_parser(gvt);
- if (ret)
- goto out_clean_sched_policy;
-
- ret = init_service_thread(gvt);
- if (ret)
- goto out_clean_cmd_parser;
-
- ret = intel_gvt_init_vgpu_types(gvt);
- if (ret)
- goto out_clean_thread;
-
- vgpu = intel_gvt_create_idle_vgpu(gvt);
- if (IS_ERR(vgpu)) {
- ret = PTR_ERR(vgpu);
- gvt_err("failed to create idle vgpu\n");
- goto out_clean_types;
- }
- gvt->idle_vgpu = vgpu;
-
- intel_gvt_debugfs_init(gvt);
-
- gvt_dbg_core("gvt device initialization is done\n");
- intel_gvt_host.dev = i915->drm.dev;
- intel_gvt_host.initialized = true;
- return 0;
-
-out_clean_types:
- intel_gvt_clean_vgpu_types(gvt);
-out_clean_thread:
- clean_service_thread(gvt);
-out_clean_cmd_parser:
- intel_gvt_clean_cmd_parser(gvt);
-out_clean_sched_policy:
- intel_gvt_clean_sched_policy(gvt);
-out_clean_workload_scheduler:
- intel_gvt_clean_workload_scheduler(gvt);
-out_clean_gtt:
- intel_gvt_clean_gtt(gvt);
-out_free_firmware:
- intel_gvt_free_firmware(gvt);
-out_clean_mmio_info:
- intel_gvt_clean_mmio_info(gvt);
-out_clean_idr:
- idr_destroy(&gvt->vgpu_idr);
- kfree(gvt);
- i915->gvt = NULL;
- return ret;
-}
-
-int
-intel_gvt_pm_resume(struct intel_gvt *gvt)
-{
- intel_gvt_restore_fence(gvt);
- intel_gvt_restore_mmio(gvt);
- intel_gvt_restore_ggtt(gvt);
- return 0;
-}
-
-int
-intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m)
-{
- int ret;
- void *gvt;
-
- if (!intel_gvt_host.initialized)
- return -ENODEV;
-
- if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
- m->type != INTEL_GVT_HYPERVISOR_XEN)
- return -EINVAL;
-
- /* Get a reference for device model module */
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- intel_gvt_host.mpt = m;
- intel_gvt_host.hypervisor_type = m->type;
- gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
-
- ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
- &intel_gvt_ops);
- if (ret < 0) {
- gvt_err("Failed to init %s hypervisor module\n",
- supported_hypervisors[intel_gvt_host.hypervisor_type]);
- module_put(THIS_MODULE);
- return -ENODEV;
- }
- gvt_dbg_core("Running with hypervisor %s in host mode\n",
- supported_hypervisors[intel_gvt_host.hypervisor_type]);
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
-
-void
-intel_gvt_unregister_hypervisor(void)
-{
- void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
- intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
- module_put(THIS_MODULE);
-}
-EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 0ebffc327528..03ecffc2ba56 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -34,11 +34,13 @@
#define _GVT_H_
#include <uapi/linux/pci_regs.h>
+#include <linux/kvm_host.h>
+#include <linux/vfio.h>
#include "i915_drv.h"
+#include "intel_gvt.h"
#include "debug.h"
-#include "hypercall.h"
#include "mmio.h"
#include "reg.h"
#include "interrupt.h"
@@ -56,15 +58,6 @@
#define GVT_MAX_VGPU 8
-struct intel_gvt_host {
- struct device *dev;
- bool initialized;
- int hypervisor_type;
- const struct intel_gvt_mpt *mpt;
-};
-
-extern struct intel_gvt_host intel_gvt_host;
-
/* Describe per-platform limitations. */
struct intel_gvt_device_info {
u32 max_support_vgpus;
@@ -176,12 +169,14 @@ struct intel_vgpu_submission {
} last_ctx[I915_NUM_ENGINES];
};
+#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
+
struct intel_vgpu {
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
- unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
+ bool attached;
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
@@ -209,21 +204,40 @@ struct intel_vgpu {
struct dentry *debugfs;
- /* Hypervisor-specific device state. */
- void *vdev;
-
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
struct intel_vgpu_vblank_timer vblank_timer;
u32 scan_nonprivbb;
-};
-static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
-{
- return vgpu->vdev;
-}
+ struct vfio_device vfio_device;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+
+ /*
+ * Two caches are used to avoid mapping duplicated pages (eg.
+ * scratch pages). This help to reduce dma setup overhead.
+ */
+ struct rb_root gfn_cache;
+ struct rb_root dma_addr_cache;
+ unsigned long nr_cache_entries;
+ struct mutex cache_lock;
+
+ struct notifier_block iommu_notifier;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+ struct work_struct release_work;
+ atomic_t released;
+ struct vfio_group *vfio_group;
+
+ struct kvm_page_track_notifier_node track_node;
+#define NR_BKT (1 << 18)
+ struct hlist_head ptable[NR_BKT];
+#undef NR_BKT
+};
/* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \
@@ -272,7 +286,7 @@ struct intel_gvt_mmio {
/* Value of command write of this reg needs to be patched */
#define F_CMD_WRITE_PATCH (1 << 8)
- const struct gvt_mmio_block *mmio_block;
+ struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
@@ -428,7 +442,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define RING_CTX_SIZE 320
struct intel_vgpu_creation_params {
- __u64 handle;
__u64 low_gm_sz; /* in MB */
__u64 high_gm_sz; /* in MB */
__u64 fence_sz;
@@ -496,6 +509,9 @@ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
+int intel_gvt_set_opregion(struct intel_vgpu *vgpu);
+int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num);
+
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
@@ -557,30 +573,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
-
-struct intel_gvt_ops {
- int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
- unsigned int);
- int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
- unsigned int);
- int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
- unsigned int);
- int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
- unsigned int);
- struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
- struct intel_vgpu_type *);
- void (*vgpu_destroy)(struct intel_vgpu *vgpu);
- void (*vgpu_release)(struct intel_vgpu *vgpu);
- void (*vgpu_reset)(struct intel_vgpu *);
- void (*vgpu_activate)(struct intel_vgpu *);
- void (*vgpu_deactivate)(struct intel_vgpu *);
- int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
- int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
- int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
- unsigned int);
- void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
-};
-
+void intel_vgpu_detach_regions(struct intel_vgpu *vgpu);
enum {
GVT_FAILSAFE_UNSUPPORTED_GUEST,
@@ -724,13 +717,54 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
}
+/**
+ * intel_gvt_read_gpa - copy data from GPA to host data buffer
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ if (!vgpu->attached)
+ return -ESRCH;
+ return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
+}
+
+/**
+ * intel_gvt_write_gpa - copy data from host data buffer to GPA
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
+ unsigned long gpa, void *buf, unsigned long len)
+{
+ if (!vgpu->attached)
+ return -ESRCH;
+ return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
+}
+
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
-int intel_gvt_pm_resume(struct intel_gvt *gvt);
+int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn);
+int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn);
+int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
+int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long size, dma_addr_t *dma_addr);
+void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr);
#include "trace.h"
-#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 520a7e1942f3..beea5895e499 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -42,6 +42,7 @@
#include "i915_pvinfo.h"
#include "intel_mchbar_regs.h"
#include "display/intel_display_types.h"
+#include "display/intel_dmc_regs.h"
#include "display/intel_fbc.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
@@ -71,7 +72,7 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return 0;
}
-bool intel_gvt_match_device(struct intel_gvt *gvt,
+static bool intel_gvt_match_device(struct intel_gvt *gvt,
unsigned long device)
{
return intel_gvt_get_device_type(gvt) & device;
@@ -101,12 +102,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
return NULL;
}
-static int new_mmio_info(struct intel_gvt *gvt,
- u32 offset, u16 flags, u32 size,
- u32 addr_mask, u32 ro_mask, u32 device,
- gvt_mmio_func read, gvt_mmio_func write)
+static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size,
+ u16 flags, u32 addr_mask, u32 ro_mask, u32 device,
+ gvt_mmio_func read, gvt_mmio_func write)
{
- struct intel_gvt_mmio_info *info, *p;
+ struct intel_gvt_mmio_info *p;
u32 start, end, i;
if (!intel_gvt_match_device(gvt, device))
@@ -119,32 +119,18 @@ static int new_mmio_info(struct intel_gvt *gvt,
end = offset + size;
for (i = start; i < end; i += 4) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->offset = i;
- p = intel_gvt_find_mmio_info(gvt, info->offset);
- if (p) {
- WARN(1, "dup mmio definition offset %x\n",
- info->offset);
- kfree(info);
-
- /* We return -EEXIST here to make GVT-g load fail.
- * So duplicated MMIO can be found as soon as
- * possible.
- */
- return -EEXIST;
+ p = intel_gvt_find_mmio_info(gvt, i);
+ if (!p) {
+ WARN(1, "assign a handler to a non-tracked mmio %x\n",
+ i);
+ return -ENODEV;
}
-
- info->ro_mask = ro_mask;
- info->device = device;
- info->read = read ? read : intel_vgpu_default_mmio_read;
- info->write = write ? write : intel_vgpu_default_mmio_write;
- gvt->mmio.mmio_attribute[info->offset / 4] = flags;
- INIT_HLIST_NODE(&info->node);
- hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
- gvt->mmio.num_tracked_mmio++;
+ p->ro_mask = ro_mask;
+ gvt->mmio.mmio_attribute[i / 4] = flags;
+ if (read)
+ p->read = read;
+ if (write)
+ p->write = write;
}
return 0;
}
@@ -576,14 +562,19 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
}
clock.m1 = 2;
- clock.m2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0)) & PORT_PLL_M2_MASK) << 22;
+ clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK,
+ vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)) & PORT_PLL_M2_FRAC_MASK;
- clock.n = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)) & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
- clock.p1 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
- clock.p2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+ clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
+ vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
+ clock.n = REG_FIELD_GET(PORT_PLL_N_MASK,
+ vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
+ clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK,
+ vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
+ clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK,
+ vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
clock.m = clock.m1 * clock.m2;
- clock.p = clock.p1 * clock.p2;
+ clock.p = clock.p1 * clock.p2 * 5;
if (clock.n == 0 || clock.p == 0) {
gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
@@ -593,7 +584,7 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
- dp_br = clock.dot / 5;
+ dp_br = clock.dot;
out:
return dp_br;
@@ -2137,15 +2128,12 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
}
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
- ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
- f, s, am, rm, d, r, w); \
+ ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
+ s, f, am, rm, d, r, w); \
if (ret) \
return ret; \
} while (0)
-#define MMIO_D(reg, d) \
- MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
-
#define MMIO_DH(reg, d, r, w) \
MMIO_F(reg, 4, 0, 0, 0, d, r, w)
@@ -2170,9 +2158,6 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
-#define MMIO_RING_D(prefix, d) \
- MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
-
#define MMIO_RING_DFH(prefix, d, f, r, w) \
MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
@@ -2196,7 +2181,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(SDEISR, D_ALL);
MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
@@ -2224,7 +2208,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
- MMIO_D(GEN7_CXT_SIZE, D_ALL);
MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
@@ -2278,257 +2261,32 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(_MMIO(0x602a0), D_ALL);
-
- MMIO_D(_MMIO(0x65050), D_ALL);
- MMIO_D(_MMIO(0x650b4), D_ALL);
-
- MMIO_D(_MMIO(0xc4040), D_ALL);
- MMIO_D(DERRMR, D_ALL);
-
- MMIO_D(PIPEDSL(PIPE_A), D_ALL);
- MMIO_D(PIPEDSL(PIPE_B), D_ALL);
- MMIO_D(PIPEDSL(PIPE_C), D_ALL);
- MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
-
MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
-
- MMIO_D(PIPESTAT(PIPE_A), D_ALL);
- MMIO_D(PIPESTAT(PIPE_B), D_ALL);
- MMIO_D(PIPESTAT(PIPE_C), D_ALL);
- MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
-
- MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
- MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
- MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
- MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
-
- MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
- MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
- MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
- MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
-
- MMIO_D(CURCNTR(PIPE_A), D_ALL);
- MMIO_D(CURCNTR(PIPE_B), D_ALL);
- MMIO_D(CURCNTR(PIPE_C), D_ALL);
-
- MMIO_D(CURPOS(PIPE_A), D_ALL);
- MMIO_D(CURPOS(PIPE_B), D_ALL);
- MMIO_D(CURPOS(PIPE_C), D_ALL);
-
- MMIO_D(CURBASE(PIPE_A), D_ALL);
- MMIO_D(CURBASE(PIPE_B), D_ALL);
- MMIO_D(CURBASE(PIPE_C), D_ALL);
-
- MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
- MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
- MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
-
- MMIO_D(_MMIO(0x700ac), D_ALL);
- MMIO_D(_MMIO(0x710ac), D_ALL);
- MMIO_D(_MMIO(0x720ac), D_ALL);
-
- MMIO_D(_MMIO(0x70090), D_ALL);
- MMIO_D(_MMIO(0x70094), D_ALL);
- MMIO_D(_MMIO(0x70098), D_ALL);
- MMIO_D(_MMIO(0x7009c), D_ALL);
-
- MMIO_D(DSPCNTR(PIPE_A), D_ALL);
- MMIO_D(DSPADDR(PIPE_A), D_ALL);
- MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
- MMIO_D(DSPPOS(PIPE_A), D_ALL);
- MMIO_D(DSPSIZE(PIPE_A), D_ALL);
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
- MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
- MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
-
- MMIO_D(DSPCNTR(PIPE_B), D_ALL);
- MMIO_D(DSPADDR(PIPE_B), D_ALL);
- MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
- MMIO_D(DSPPOS(PIPE_B), D_ALL);
- MMIO_D(DSPSIZE(PIPE_B), D_ALL);
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
- MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
- MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
-
- MMIO_D(DSPCNTR(PIPE_C), D_ALL);
- MMIO_D(DSPADDR(PIPE_C), D_ALL);
- MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
- MMIO_D(DSPPOS(PIPE_C), D_ALL);
- MMIO_D(DSPSIZE(PIPE_C), D_ALL);
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
- MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
- MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
-
- MMIO_D(SPRCTL(PIPE_A), D_ALL);
- MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
- MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
- MMIO_D(SPRPOS(PIPE_A), D_ALL);
- MMIO_D(SPRSIZE(PIPE_A), D_ALL);
- MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
- MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
- MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
- MMIO_D(SPROFFSET(PIPE_A), D_ALL);
- MMIO_D(SPRSCALE(PIPE_A), D_ALL);
- MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
-
- MMIO_D(SPRCTL(PIPE_B), D_ALL);
- MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
- MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
- MMIO_D(SPRPOS(PIPE_B), D_ALL);
- MMIO_D(SPRSIZE(PIPE_B), D_ALL);
- MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
- MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
- MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
- MMIO_D(SPROFFSET(PIPE_B), D_ALL);
- MMIO_D(SPRSCALE(PIPE_B), D_ALL);
- MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
-
- MMIO_D(SPRCTL(PIPE_C), D_ALL);
- MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
- MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
- MMIO_D(SPRPOS(PIPE_C), D_ALL);
- MMIO_D(SPRSIZE(PIPE_C), D_ALL);
- MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
- MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
- MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
- MMIO_D(SPROFFSET(PIPE_C), D_ALL);
- MMIO_D(SPRSCALE(PIPE_C), D_ALL);
- MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
- MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
- MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
- MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
- MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
- MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
- MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
- MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
- MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
- MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
-
- MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
- MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
- MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
- MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
- MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
- MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
- MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
- MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
- MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
-
- MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
- MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
- MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
- MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
- MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
- MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
- MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
- MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
- MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
-
- MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
- MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
- MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
- MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
- MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
- MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
- MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
- MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
-
- MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
-
- MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
-
- MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
-
- MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
-
- MMIO_D(PF_CTL(PIPE_A), D_ALL);
- MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
- MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
- MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
- MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
-
- MMIO_D(PF_CTL(PIPE_B), D_ALL);
- MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
- MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
- MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
- MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
-
- MMIO_D(PF_CTL(PIPE_C), D_ALL);
- MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
- MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
- MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
- MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
-
- MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL);
- MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL);
- MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL);
- MMIO_D(WM1_LP_ILK, D_ALL);
- MMIO_D(WM2_LP_ILK, D_ALL);
- MMIO_D(WM3_LP_ILK, D_ALL);
- MMIO_D(WM1S_LP_ILK, D_ALL);
- MMIO_D(WM2S_LP_IVB, D_ALL);
- MMIO_D(WM3S_LP_IVB, D_ALL);
-
- MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
- MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
- MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
- MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
-
- MMIO_D(_MMIO(0x48268), D_ALL);
-
MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
gmbus_mmio_write);
MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
@@ -2551,74 +2309,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
-
- MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL);
-
- MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL);
-
- MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL);
-
- MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
- MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
- MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
-
- MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
- MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
- MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
-
- MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
- MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
- MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
-
- MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
- MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
- MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
-
- MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL);
- MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL);
- MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL);
- MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL);
- MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL);
- MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL);
-
MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
- MMIO_D(PCH_PP_DIVISOR, D_ALL);
- MMIO_D(PCH_PP_STATUS, D_ALL);
- MMIO_D(PCH_LVDS, D_ALL);
- MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL);
- MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL);
- MMIO_D(_MMIO(_PCH_FPA0), D_ALL);
- MMIO_D(_MMIO(_PCH_FPA1), D_ALL);
- MMIO_D(_MMIO(_PCH_FPB0), D_ALL);
- MMIO_D(_MMIO(_PCH_FPB1), D_ALL);
- MMIO_D(PCH_DREF_CONTROL, D_ALL);
- MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
- MMIO_D(PCH_DPLL_SEL, D_ALL);
-
- MMIO_D(_MMIO(0x61208), D_ALL);
- MMIO_D(_MMIO(0x6120c), D_ALL);
- MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
- MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
-
MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
@@ -2634,143 +2325,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
- MMIO_D(FUSE_STRAP, D_ALL);
- MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
-
- MMIO_D(DISP_ARB_CTL, D_ALL);
- MMIO_D(DISP_ARB_CTL2, D_ALL);
-
- MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
- MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
- MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
-
- MMIO_D(SOUTH_CHICKEN1, D_ALL);
MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
- MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL);
- MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL);
- MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
- MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL);
- MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL);
-
- MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_FBC_RT_BASE, D_ALL);
-
- MMIO_D(IPS_CTL, D_ALL);
-
- MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
-
- MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
-
- MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
-
- MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
- MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
- MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
- MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
- MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
- MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
- MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(_MMIO(0x60110), D_ALL);
- MMIO_D(_MMIO(0x61110), D_ALL);
- MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
-
- MMIO_D(WM_LINETIME(PIPE_A), D_ALL);
- MMIO_D(WM_LINETIME(PIPE_B), D_ALL);
- MMIO_D(WM_LINETIME(PIPE_C), D_ALL);
- MMIO_D(SPLL_CTL, D_ALL);
- MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
- MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
- MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
- MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
- MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
- MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
- MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
- MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
- MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
- MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
-
- MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
- MMIO_D(_MMIO(0x46508), D_ALL);
-
- MMIO_D(_MMIO(0x49080), D_ALL);
- MMIO_D(_MMIO(0x49180), D_ALL);
- MMIO_D(_MMIO(0x49280), D_ALL);
-
- MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
- MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
- MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
-
- MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
- MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
- MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
-
- MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
- MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
- MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
-
MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
- MMIO_D(SBI_ADDR, D_ALL);
MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
- MMIO_D(PIXCLK_GATE, D_ALL);
MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
dp_aux_ch_ctl_mmio_write);
@@ -2793,65 +2351,18 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
- MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
- MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
- MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
-
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
- MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL);
- MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL);
- MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL);
- MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL);
-
MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
- MMIO_D(FORCEWAKE_ACK, D_ALL);
- MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
- MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
- MMIO_D(ECOBUS, D_ALL);
MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
- MMIO_D(GEN6_RPNSWREQ, D_ALL);
- MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
- MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
- MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
- MMIO_D(GEN6_RPSTAT1, D_ALL);
- MMIO_D(GEN6_RP_CONTROL, D_ALL);
- MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
- MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
- MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
- MMIO_D(GEN6_RP_CUR_UP, D_ALL);
- MMIO_D(GEN6_RP_PREV_UP, D_ALL);
- MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
- MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
- MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
- MMIO_D(GEN6_RP_UP_EI, D_ALL);
- MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
- MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
- MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
- MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
- MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
- MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
- MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
- MMIO_D(GEN6_RC_SLEEP, D_ALL);
- MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
- MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
- MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
- MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
- MMIO_D(GEN6_PMINTRMSK, D_ALL);
MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
@@ -2859,97 +2370,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
- MMIO_D(RSTDBYCTL, D_ALL);
-
MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
- MMIO_D(TILECTL, D_ALL);
-
- MMIO_D(GEN6_UCGCTL1, D_ALL);
- MMIO_D(GEN6_UCGCTL2, D_ALL);
-
- MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(GEN6_PCODE_DATA, D_ALL);
- MMIO_D(_MMIO(0x13812c), D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
- MMIO_D(HSW_EDRAM_CAP, D_ALL);
- MMIO_D(HSW_IDICR, D_ALL);
MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
- MMIO_D(_MMIO(0x3c), D_ALL);
- MMIO_D(_MMIO(0x860), D_ALL);
- MMIO_D(ECOSKPD(RENDER_RING_BASE), D_ALL);
- MMIO_D(_MMIO(0x121d0), D_ALL);
- MMIO_D(ECOSKPD(BLT_RING_BASE), D_ALL);
- MMIO_D(_MMIO(0x41d0), D_ALL);
- MMIO_D(GAC_ECO_BITS, D_ALL);
- MMIO_D(_MMIO(0x6200), D_ALL);
- MMIO_D(_MMIO(0x6204), D_ALL);
- MMIO_D(_MMIO(0x6208), D_ALL);
- MMIO_D(_MMIO(0x7118), D_ALL);
- MMIO_D(_MMIO(0x7180), D_ALL);
- MMIO_D(_MMIO(0x7408), D_ALL);
- MMIO_D(_MMIO(0x7c00), D_ALL);
MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
- MMIO_D(_MMIO(0x911c), D_ALL);
- MMIO_D(_MMIO(0x9120), D_ALL);
MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GAB_CTL, D_ALL);
- MMIO_D(_MMIO(0x48800), D_ALL);
- MMIO_D(_MMIO(0xce044), D_ALL);
- MMIO_D(_MMIO(0xe6500), D_ALL);
- MMIO_D(_MMIO(0xe6504), D_ALL);
- MMIO_D(_MMIO(0xe6600), D_ALL);
- MMIO_D(_MMIO(0xe6604), D_ALL);
- MMIO_D(_MMIO(0xe6700), D_ALL);
- MMIO_D(_MMIO(0xe6704), D_ALL);
- MMIO_D(_MMIO(0xe6800), D_ALL);
- MMIO_D(_MMIO(0xe6804), D_ALL);
- MMIO_D(PCH_GMBUS4, D_ALL);
- MMIO_D(PCH_GMBUS5, D_ALL);
-
- MMIO_D(_MMIO(0x902c), D_ALL);
- MMIO_D(_MMIO(0xec008), D_ALL);
- MMIO_D(_MMIO(0xec00c), D_ALL);
- MMIO_D(_MMIO(0xec008 + 0x18), D_ALL);
- MMIO_D(_MMIO(0xec00c + 0x18), D_ALL);
- MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL);
- MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL);
- MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL);
- MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL);
- MMIO_D(_MMIO(0xec408), D_ALL);
- MMIO_D(_MMIO(0xec40c), D_ALL);
- MMIO_D(_MMIO(0xec408 + 0x18), D_ALL);
- MMIO_D(_MMIO(0xec40c + 0x18), D_ALL);
- MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL);
- MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL);
- MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL);
- MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL);
- MMIO_D(_MMIO(0xfc810), D_ALL);
- MMIO_D(_MMIO(0xfc81c), D_ALL);
- MMIO_D(_MMIO(0xfc828), D_ALL);
- MMIO_D(_MMIO(0xfc834), D_ALL);
- MMIO_D(_MMIO(0xfcc00), D_ALL);
- MMIO_D(_MMIO(0xfcc0c), D_ALL);
- MMIO_D(_MMIO(0xfcc18), D_ALL);
- MMIO_D(_MMIO(0xfcc24), D_ALL);
- MMIO_D(_MMIO(0xfd000), D_ALL);
- MMIO_D(_MMIO(0xfd00c), D_ALL);
- MMIO_D(_MMIO(0xfd018), D_ALL);
- MMIO_D(_MMIO(0xfd024), D_ALL);
- MMIO_D(_MMIO(0xfd034), D_ALL);
-
MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
- MMIO_D(_MMIO(0x2054), D_ALL);
- MMIO_D(_MMIO(0x12054), D_ALL);
- MMIO_D(_MMIO(0x22054), D_ALL);
- MMIO_D(_MMIO(0x1a054), D_ALL);
-
- MMIO_D(_MMIO(0x44070), D_ALL);
MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2957,8 +2388,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x2b00), D_BDW_PLUS);
- MMIO_D(_MMIO(0x2360), D_BDW_PLUS);
MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
@@ -3006,28 +2435,23 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
static int init_bdw_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
@@ -3035,7 +2459,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
@@ -3043,7 +2466,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
@@ -3051,22 +2473,18 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
@@ -3101,21 +2519,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
#undef RING_REG
- MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
- MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
- MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
- MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS);
- MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
- MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
- MMIO_D(_MMIO(0x1c054), D_BDW_PLUS);
-
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
- MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
- MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
-
- MMIO_D(GAMTARBMODE, D_BDW_PLUS);
-
#define RING_REG(base) _MMIO((base) + 0x270)
MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
@@ -3124,24 +2529,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
-
- MMIO_D(WM_MISC, D_BDW);
- MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
-
- MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
- MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
- MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
-
- MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
-
- MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
- MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
- MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
-
- MMIO_D(_MMIO(0xfdc), D_BDW_PLUS);
MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
@@ -3153,27 +2540,14 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(_MMIO(0xb110), D_BDW);
- MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
D_BDW_PLUS, NULL, force_nonpriv_write);
- MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
- MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
-
MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(_MMIO(0x110000), D_BDW_PLUS);
-
- MMIO_D(_MMIO(0x48400), D_BDW_PLUS);
-
- MMIO_D(_MMIO(0x6e570), D_BDW_PLUS);
- MMIO_D(_MMIO(0x65f10), D_BDW_PLUS);
-
MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -3213,30 +2587,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
- MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
- MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
- MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
- MMIO_D(DC_STATE_EN, D_SKL_PLUS);
- MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
- MMIO_D(CDCLK_CTL, D_SKL_PLUS);
MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
- MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
- MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
- MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
- MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
- MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
- MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
- MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
@@ -3279,22 +2638,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
-
- MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
-
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
@@ -3356,30 +2699,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
- MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
- MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
-
- MMIO_D(DMC_SSP_BASE, D_SKL_PLUS);
- MMIO_D(DMC_HTP_SKL, D_SKL_PLUS);
- MMIO_D(DMC_LAST_WRITE, D_SKL_PLUS);
-
MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(SKL_DFSM, D_SKL_PLUS);
- MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
-
MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
- MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
- MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
- MMIO_D(RC6_LOCATION, D_SKL_PLUS);
MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
@@ -3396,40 +2722,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
NULL, gen9_trtt_chicken_write);
- MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
-
- MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
-
- MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
- MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
- MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
- MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
- MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
- MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
- MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
- MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
-
- MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
- MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
- MMIO_D(_MMIO(0x72034), D_SKL_PLUS);
-
- MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
-
- MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, csfe_chicken1_mmio_write);
@@ -3440,7 +2735,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
@@ -3448,43 +2742,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
static int init_bxt_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
- MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
-
- MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
- MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
- MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
- MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
- MMIO_D(ERROR_GEN6, D_BXT);
- MMIO_D(DONE_REG, D_BXT);
- MMIO_D(EIR, D_BXT);
- MMIO_D(PGTBL_ER, D_BXT);
- MMIO_D(_MMIO(0x4194), D_BXT);
- MMIO_D(_MMIO(0x4294), D_BXT);
- MMIO_D(_MMIO(0x4494), D_BXT);
-
- MMIO_RING_D(RING_PSMI_CTL, D_BXT);
- MMIO_RING_D(RING_DMA_FADD, D_BXT);
- MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
- MMIO_RING_D(RING_IPEHR, D_BXT);
- MMIO_RING_D(RING_INSTPS, D_BXT);
- MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
- MMIO_RING_D(RING_BBSTATE, D_BXT);
- MMIO_RING_D(RING_IPEIR, D_BXT);
-
- MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
-
MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
- MMIO_D(BXT_RP_STATE_CAP, D_BXT);
MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
NULL, bxt_phy_ctl_family_write);
MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
NULL, bxt_phy_ctl_family_write);
- MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
- MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
- MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
NULL, bxt_port_pll_enable_write);
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
@@ -3492,128 +2756,19 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
bxt_port_pll_enable_write);
- MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
-
- MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
-
- MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
bxt_port_tx_dw3_read, NULL);
- MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
-
- MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
bxt_port_tx_dw3_read, NULL);
- MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
-
- MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
bxt_port_tx_dw3_read, NULL);
- MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
-
- MMIO_D(BXT_DE_PLL_CTL, D_BXT);
MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
- MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
- MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
-
- MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
- MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
-
- MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
- MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
- MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
-
- MMIO_D(RC6_CTX_BASE, D_BXT);
-
- MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
- MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
- MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
- MMIO_D(GEN6_GFXPAUSE, D_BXT);
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
@@ -3633,17 +2788,14 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
return 0;
}
-static const struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
- unsigned int offset)
+static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
+ unsigned int offset)
{
- unsigned long device = intel_gvt_get_device_type(gvt);
- const struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
int num = gvt->mmio.num_mmio_block;
int i;
for (i = 0; i < num; i++, block++) {
- if (!(device & block->device))
- continue;
if (offset >= i915_mmio_reg_offset(block->offset) &&
offset < i915_mmio_reg_offset(block->offset) + block->size)
return block;
@@ -3668,23 +2820,117 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
kfree(e);
+ kfree(gvt->mmio.mmio_block);
+ gvt->mmio.mmio_block = NULL;
+ gvt->mmio.num_mmio_block = 0;
+
vfree(gvt->mmio.mmio_attribute);
gvt->mmio.mmio_attribute = NULL;
}
-/* Special MMIO blocks. registers in MMIO block ranges should not be command
- * accessible (should have no F_CMD_ACCESS flag).
- * otherwise, need to update cmd_reg_handler in cmd_parser.c
- */
-static const struct gvt_mmio_block mmio_blocks[] = {
- {D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL},
- {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
- {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
- pvinfo_mmio_read, pvinfo_mmio_write},
- {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
-};
+static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
+ u32 size)
+{
+ struct intel_gvt *gvt = iter->data;
+ struct intel_gvt_mmio_info *info, *p;
+ u32 start, end, i;
+
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+
+ start = offset;
+ end = offset + size;
+
+ for (i = start; i < end; i += 4) {
+ p = intel_gvt_find_mmio_info(gvt, i);
+ if (p) {
+ WARN(1, "dup mmio definition offset %x\n",
+ info->offset);
+
+ /* We return -EEXIST here to make GVT-g load fail.
+ * So duplicated MMIO can be found as soon as
+ * possible.
+ */
+ return -EEXIST;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->offset = i;
+ info->read = intel_vgpu_default_mmio_read;
+ info->write = intel_vgpu_default_mmio_write;
+ INIT_HLIST_NODE(&info->node);
+ hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
+ gvt->mmio.num_tracked_mmio++;
+ }
+ return 0;
+}
+
+static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter,
+ u32 offset, u32 size)
+{
+ struct intel_gvt *gvt = iter->data;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ void *ret;
+
+ ret = krealloc(block,
+ (gvt->mmio.num_mmio_block + 1) * sizeof(*block),
+ GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ gvt->mmio.mmio_block = block = ret;
+
+ block += gvt->mmio.num_mmio_block;
+
+ memset(block, 0, sizeof(*block));
+
+ block->offset = _MMIO(offset);
+ block->size = size;
+
+ gvt->mmio.num_mmio_block++;
+
+ return 0;
+}
+
+static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset,
+ u32 size)
+{
+ if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0)))
+ return handle_mmio(iter, offset, size);
+ else
+ return handle_mmio_block(iter, offset, size);
+}
+
+static int init_mmio_info(struct intel_gvt *gvt)
+{
+ struct intel_gvt_mmio_table_iter iter = {
+ .i915 = gvt->gt->i915,
+ .data = gvt,
+ .handle_mmio_cb = handle_mmio_cb,
+ };
+
+ return intel_gvt_iterate_mmio_table(&iter);
+}
+
+static int init_mmio_block_handlers(struct intel_gvt *gvt)
+{
+ struct gvt_mmio_block *block;
+
+ block = find_mmio_block(gvt, VGT_PVINFO_PAGE);
+ if (!block) {
+ WARN(1, "fail to assign handlers to mmio block %x\n",
+ i915_mmio_reg_offset(gvt->mmio.mmio_block->offset));
+ return -ENODEV;
+ }
+
+ block->read = pvinfo_mmio_read;
+ block->write = pvinfo_mmio_write;
+
+ return 0;
+}
/**
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
@@ -3707,6 +2953,14 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (!gvt->mmio.mmio_attribute)
return -ENOMEM;
+ ret = init_mmio_info(gvt);
+ if (ret)
+ goto err;
+
+ ret = init_mmio_block_handlers(gvt);
+ if (ret)
+ goto err;
+
ret = init_generic_mmio_info(gvt);
if (ret)
goto err;
@@ -3737,9 +2991,6 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
}
- gvt->mmio.mmio_block = mmio_blocks;
- gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
-
return 0;
err:
intel_gvt_clean_mmio_info(gvt);
@@ -3759,7 +3010,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data)
{
- const struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
struct intel_gvt_mmio_info *e;
int i, j, ret;
@@ -3775,9 +3026,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
continue;
for (j = 0; j < block->size; j += 4) {
- ret = handler(gvt,
- i915_mmio_reg_offset(block->offset) + j,
- data);
+ ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data);
if (ret)
return ret;
}
@@ -3877,7 +3126,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio_info;
- const struct gvt_mmio_block *mmio_block;
+ struct gvt_mmio_block *mmio_block;
gvt_mmio_func func;
int ret;
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
deleted file mode 100644
index f33e3cbd0439..000000000000
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- * Eddie Dong <eddie.dong@intel.com>
- * Dexuan Cui
- * Jike Song <jike.song@intel.com>
- *
- * Contributors:
- * Zhi Wang <zhi.a.wang@intel.com>
- *
- */
-
-#ifndef _GVT_HYPERCALL_H_
-#define _GVT_HYPERCALL_H_
-
-#include <linux/types.h>
-
-struct device;
-
-enum hypervisor_type {
- INTEL_GVT_HYPERVISOR_XEN = 0,
- INTEL_GVT_HYPERVISOR_KVM,
-};
-
-/*
- * Specific GVT-g MPT modules function collections. Currently GVT-g supports
- * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
- */
-struct intel_gvt_mpt {
- enum hypervisor_type type;
- int (*host_init)(struct device *dev, void *gvt, const void *ops);
- void (*host_exit)(struct device *dev, void *gvt);
- int (*attach_vgpu)(void *vgpu, unsigned long *handle);
- void (*detach_vgpu)(void *vgpu);
- int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
- unsigned long (*from_virt_to_mfn)(void *p);
- int (*enable_page_track)(unsigned long handle, u64 gfn);
- int (*disable_page_track)(unsigned long handle, u64 gfn);
- int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
- unsigned long len);
- int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
- unsigned long len);
- unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
-
- int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
- unsigned long size, dma_addr_t *dma_addr);
- void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
-
- int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
-
- int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
- unsigned long mfn, unsigned int nr, bool map);
- int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
- bool map);
- int (*set_opregion)(void *vgpu);
- int (*set_edid)(void *vgpu, int port_num);
- int (*get_vfio_device)(void *vgpu);
- void (*put_vfio_device)(void *vgpu);
- bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
-};
-
-#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 228f623d466d..a6b2021b665f 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -29,6 +29,8 @@
*
*/
+#include <linux/eventfd.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
@@ -397,9 +399,45 @@ static void init_irq_map(struct intel_gvt_irq *irq)
}
/* =======================vEvent injection===================== */
+
+#define MSI_CAP_CONTROL(offset) (offset + 2)
+#define MSI_CAP_ADDRESS(offset) (offset + 4)
+#define MSI_CAP_DATA(offset) (offset + 8)
+#define MSI_CAP_EN 0x1
+
static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
{
- return intel_gvt_hypervisor_inject_msi(vgpu);
+ unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
+ u16 control, data;
+ u32 addr;
+
+ control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
+ addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
+ data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
+
+ /* Do not generate MSI if MSIEN is disabled */
+ if (!(control & MSI_CAP_EN))
+ return 0;
+
+ if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
+ return -EINVAL;
+
+ trace_inject_msi(vgpu->id, addr, data);
+
+ /*
+ * When guest is powered off, msi_trigger is set to NULL, but vgpu's
+ * config and mmio register isn't restored to default during guest
+ * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
+ * may be enabled, then once this vgpu is active, it will get inject
+ * vblank interrupt request. But msi_trigger is null until msi is
+ * enabled by guest. so if msi_trigger is null, success is still
+ * returned and don't inject interrupt into guest.
+ */
+ if (!vgpu->attached)
+ return -ESRCH;
+ if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
+ return -EFAULT;
+ return 0;
}
static void propagate_event(struct intel_gvt_irq *irq,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 057ec4490104..0787ba5c301f 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1,7 +1,7 @@
/*
* KVMGT - the implementation of Intel mediated pass-through framework for KVM
*
- * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -26,6 +26,11 @@
* Kevin Tian <kevin.tian@intel.com>
* Jike Song <jike.song@intel.com>
* Xiaoguang Chen <xiaoguang.chen@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
*/
#include <linux/init.h>
@@ -39,8 +44,6 @@
#include <linux/spinlock.h>
#include <linux/eventfd.h>
#include <linux/uuid.h>
-#include <linux/kvm_host.h>
-#include <linux/vfio.h>
#include <linux/mdev.h>
#include <linux/debugfs.h>
@@ -49,9 +52,11 @@
#include <drm/drm_edid.h>
#include "i915_drv.h"
+#include "intel_gvt.h"
#include "gvt.h"
-static const struct intel_gvt_ops *intel_gvt_ops;
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS(I915_GVT);
/* helper macros copied from vfio-pci */
#define VFIO_PCI_OFFSET_SHIFT 40
@@ -90,16 +95,6 @@ struct kvmgt_pgfn {
struct hlist_node hnode;
};
-#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
-struct kvmgt_guest_info {
- struct kvm *kvm;
- struct intel_vgpu *vgpu;
- struct kvm_page_track_notifier_node track_node;
-#define NR_BKT (1 << 18)
- struct hlist_head ptable[NR_BKT];
-#undef NR_BKT
-};
-
struct gvt_dma {
struct intel_vgpu *vgpu;
struct rb_node gfn_node;
@@ -110,41 +105,15 @@ struct gvt_dma {
struct kref ref;
};
-struct kvmgt_vdev {
- struct intel_vgpu *vgpu;
- struct mdev_device *mdev;
- struct vfio_region *region;
- int num_regions;
- struct eventfd_ctx *intx_trigger;
- struct eventfd_ctx *msi_trigger;
+#define vfio_dev_to_vgpu(vfio_dev) \
+ container_of((vfio_dev), struct intel_vgpu, vfio_device)
- /*
- * Two caches are used to avoid mapping duplicated pages (eg.
- * scratch pages). This help to reduce dma setup overhead.
- */
- struct rb_root gfn_cache;
- struct rb_root dma_addr_cache;
- unsigned long nr_cache_entries;
- struct mutex cache_lock;
-
- struct notifier_block iommu_notifier;
- struct notifier_block group_notifier;
- struct kvm *kvm;
- struct work_struct release_work;
- atomic_t released;
- struct vfio_device *vfio_device;
- struct vfio_group *vfio_group;
-};
-
-static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
-{
- return intel_vgpu_vdev(vgpu);
-}
-
-static inline bool handle_valid(unsigned long handle)
-{
- return !!(handle & ~0xff);
-}
+static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node);
+static void kvmgt_page_track_flush_slot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node);
static ssize_t available_instances_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr,
@@ -259,15 +228,12 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
}
}
-static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
-static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
int total_pages;
int npage;
int ret;
@@ -277,7 +243,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
- ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
+ ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1);
drm_WARN_ON(&i915->drm, ret != 1);
}
}
@@ -286,7 +252,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long base_pfn = 0;
int total_pages;
int npage;
@@ -301,7 +266,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
- ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
+ ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1,
IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
@@ -368,7 +333,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
- struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
+ struct rb_node *node = vgpu->dma_addr_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -386,7 +351,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
+ struct rb_node *node = vgpu->gfn_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -407,7 +372,6 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new)
@@ -420,7 +384,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
- link = &vdev->gfn_cache.rb_node;
+ link = &vgpu->gfn_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, gfn_node);
@@ -431,11 +395,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->gfn_node, parent, link);
- rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
+ rb_insert_color(&new->gfn_node, &vgpu->gfn_cache);
/* dma_addr_cache maps dma addr to struct gvt_dma. */
parent = NULL;
- link = &vdev->dma_addr_cache.rb_node;
+ link = &vgpu->dma_addr_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
@@ -446,59 +410,54 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->dma_addr_node, parent, link);
- rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
+ rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache);
- vdev->nr_cache_entries++;
+ vgpu->nr_cache_entries++;
return 0;
}
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
struct gvt_dma *entry)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
-
- rb_erase(&entry->gfn_node, &vdev->gfn_cache);
- rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
+ rb_erase(&entry->gfn_node, &vgpu->gfn_cache);
+ rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache);
kfree(entry);
- vdev->nr_cache_entries--;
+ vgpu->nr_cache_entries--;
}
static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
for (;;) {
- mutex_lock(&vdev->cache_lock);
- node = rb_first(&vdev->gfn_cache);
+ mutex_lock(&vgpu->cache_lock);
+ node = rb_first(&vgpu->gfn_cache);
if (!node) {
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
}
}
static void gvt_cache_init(struct intel_vgpu *vgpu)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
-
- vdev->gfn_cache = RB_ROOT;
- vdev->dma_addr_cache = RB_ROOT;
- vdev->nr_cache_entries = 0;
- mutex_init(&vdev->cache_lock);
+ vgpu->gfn_cache = RB_ROOT;
+ vgpu->dma_addr_cache = RB_ROOT;
+ vgpu->nr_cache_entries = 0;
+ mutex_init(&vgpu->cache_lock);
}
-static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
+static void kvmgt_protect_table_init(struct intel_vgpu *info)
{
hash_init(info->ptable);
}
-static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
+static void kvmgt_protect_table_destroy(struct intel_vgpu *info)
{
struct kvmgt_pgfn *p;
struct hlist_node *tmp;
@@ -511,7 +470,7 @@ static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
}
static struct kvmgt_pgfn *
-__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
+__kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p, *res = NULL;
@@ -525,8 +484,7 @@ __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
return res;
}
-static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
- gfn_t gfn)
+static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p;
@@ -534,7 +492,7 @@ static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
return !!p;
}
-static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
+static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p;
@@ -549,8 +507,7 @@ static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
hash_add(info->ptable, &p->hnode, gfn);
}
-static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
- gfn_t gfn)
+static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p;
@@ -564,18 +521,17 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
- void *base = vdev->region[i].data;
+ void *base = vgpu->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
- if (pos >= vdev->region[i].size || iswrite) {
+ if (pos >= vgpu->region[i].size || iswrite) {
gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
return -EINVAL;
}
- count = min(count, (size_t)(vdev->region[i].size - pos));
+ count = min(count, (size_t)(vgpu->region[i].size - pos));
memcpy(buf, base + pos, count);
return count;
@@ -617,9 +573,9 @@ static int handle_edid_regs(struct intel_vgpu *vgpu,
gvt_vgpu_err("invalid EDID blob\n");
return -EINVAL;
}
- intel_gvt_ops->emulate_hotplug(vgpu, true);
+ intel_vgpu_emulate_hotplug(vgpu, true);
} else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
- intel_gvt_ops->emulate_hotplug(vgpu, false);
+ intel_vgpu_emulate_hotplug(vgpu, false);
else {
gvt_vgpu_err("invalid EDID link state %d\n",
regs->link_state);
@@ -668,8 +624,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
int ret;
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
- struct vfio_edid_region *region =
- (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
+ struct vfio_edid_region *region = vgpu->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos < region->vfio_edid_regs.edid_offset) {
@@ -701,44 +656,27 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
const struct intel_vgpu_regops *ops,
size_t size, u32 flags, void *data)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct vfio_region *region;
- region = krealloc(vdev->region,
- (vdev->num_regions + 1) * sizeof(*region),
+ region = krealloc(vgpu->region,
+ (vgpu->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
- vdev->region = region;
- vdev->region[vdev->num_regions].type = type;
- vdev->region[vdev->num_regions].subtype = subtype;
- vdev->region[vdev->num_regions].ops = ops;
- vdev->region[vdev->num_regions].size = size;
- vdev->region[vdev->num_regions].flags = flags;
- vdev->region[vdev->num_regions].data = data;
- vdev->num_regions++;
+ vgpu->region = region;
+ vgpu->region[vgpu->num_regions].type = type;
+ vgpu->region[vgpu->num_regions].subtype = subtype;
+ vgpu->region[vgpu->num_regions].ops = ops;
+ vgpu->region[vgpu->num_regions].size = size;
+ vgpu->region[vgpu->num_regions].flags = flags;
+ vgpu->region[vgpu->num_regions].data = data;
+ vgpu->num_regions++;
return 0;
}
-static int kvmgt_get_vfio_device(void *p_vgpu)
+int intel_gvt_set_opregion(struct intel_vgpu *vgpu)
{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
-
- vdev->vfio_device = vfio_device_get_from_dev(
- mdev_dev(vdev->mdev));
- if (!vdev->vfio_device) {
- gvt_vgpu_err("failed to get vfio device\n");
- return -ENODEV;
- }
- return 0;
-}
-
-
-static int kvmgt_set_opregion(void *p_vgpu)
-{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
void *base;
int ret;
@@ -764,9 +702,8 @@ static int kvmgt_set_opregion(void *p_vgpu)
return ret;
}
-static int kvmgt_set_edid(void *p_vgpu, int port_num)
+int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
struct vfio_edid_region *base;
int ret;
@@ -794,71 +731,11 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num)
return ret;
}
-static void kvmgt_put_vfio_device(void *vgpu)
-{
- struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
-
- if (WARN_ON(!vdev->vfio_device))
- return;
-
- vfio_device_put(vdev->vfio_device);
-}
-
-static int intel_vgpu_create(struct mdev_device *mdev)
-{
- struct intel_vgpu *vgpu = NULL;
- struct intel_vgpu_type *type;
- struct device *pdev;
- struct intel_gvt *gvt;
- int ret;
-
- pdev = mdev_parent_dev(mdev);
- gvt = kdev_to_i915(pdev)->gvt;
-
- type = &gvt->types[mdev_get_type_group_id(mdev)];
- if (!type) {
- ret = -EINVAL;
- goto out;
- }
-
- vgpu = intel_gvt_ops->vgpu_create(gvt, type);
- if (IS_ERR_OR_NULL(vgpu)) {
- ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
- gvt_err("failed to create intel vgpu: %d\n", ret);
- goto out;
- }
-
- INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
-
- kvmgt_vdev(vgpu)->mdev = mdev;
- mdev_set_drvdata(mdev, vgpu);
-
- gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
- dev_name(mdev_dev(mdev)));
- ret = 0;
-
-out:
- return ret;
-}
-
-static int intel_vgpu_remove(struct mdev_device *mdev)
-{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
-
- if (handle_valid(vgpu->handle))
- return -EBUSY;
-
- intel_gvt_ops->vgpu_destroy(vgpu);
- return 0;
-}
-
static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct kvmgt_vdev *vdev = container_of(nb,
- struct kvmgt_vdev,
- iommu_notifier);
- struct intel_vgpu *vgpu = vdev->vgpu;
+ struct intel_vgpu *vgpu =
+ container_of(nb, struct intel_vgpu, iommu_notifier);
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
@@ -868,7 +745,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
iov_pfn = unmap->iova >> PAGE_SHIFT;
end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
- mutex_lock(&vdev->cache_lock);
+ mutex_lock(&vgpu->cache_lock);
for (; iov_pfn < end_iov_pfn; iov_pfn++) {
entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
if (!entry)
@@ -878,7 +755,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
}
return NOTIFY_OK;
@@ -887,35 +764,54 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
static int intel_vgpu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct kvmgt_vdev *vdev = container_of(nb,
- struct kvmgt_vdev,
- group_notifier);
+ struct intel_vgpu *vgpu =
+ container_of(nb, struct intel_vgpu, group_notifier);
/* the only action we care about */
if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
- vdev->kvm = data;
+ vgpu->kvm = data;
if (!data)
- schedule_work(&vdev->release_work);
+ schedule_work(&vgpu->release_work);
}
return NOTIFY_OK;
}
-static int intel_vgpu_open_device(struct mdev_device *mdev)
+static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct intel_vgpu *itr;
+ int id;
+ bool ret = false;
+
+ mutex_lock(&vgpu->gvt->lock);
+ for_each_active_vgpu(vgpu->gvt, itr, id) {
+ if (!itr->attached)
+ continue;
+
+ if (vgpu->kvm == itr->kvm) {
+ ret = true;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&vgpu->gvt->lock);
+ return ret;
+}
+
+static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
+{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned long events;
int ret;
struct vfio_group *vfio_group;
- vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
- vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
+ vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+ vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
- &vdev->iommu_notifier);
+ ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events,
+ &vgpu->iommu_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
@@ -923,117 +819,129 @@ static int intel_vgpu_open_device(struct mdev_device *mdev)
}
events = VFIO_GROUP_NOTIFY_SET_KVM;
- ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
- &vdev->group_notifier);
+ ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events,
+ &vgpu->group_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
ret);
goto undo_iommu;
}
- vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
+ vfio_group =
+ vfio_group_get_external_user_from_dev(vgpu->vfio_device.dev);
if (IS_ERR_OR_NULL(vfio_group)) {
ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
goto undo_register;
}
- vdev->vfio_group = vfio_group;
+ vgpu->vfio_group = vfio_group;
- /* Take a module reference as mdev core doesn't take
- * a reference for vendor driver.
- */
- if (!try_module_get(THIS_MODULE)) {
- ret = -ENODEV;
+ ret = -EEXIST;
+ if (vgpu->attached)
+ goto undo_group;
+
+ ret = -ESRCH;
+ if (!vgpu->kvm || vgpu->kvm->mm != current->mm) {
+ gvt_vgpu_err("KVM is required to use Intel vGPU\n");
goto undo_group;
}
- ret = kvmgt_guest_init(mdev);
- if (ret)
+ ret = -EEXIST;
+ if (__kvmgt_vgpu_exist(vgpu))
goto undo_group;
- intel_gvt_ops->vgpu_activate(vgpu);
+ vgpu->attached = true;
+ kvm_get_kvm(vgpu->kvm);
- atomic_set(&vdev->released, 0);
- return ret;
+ kvmgt_protect_table_init(vgpu);
+ gvt_cache_init(vgpu);
+
+ vgpu->track_node.track_write = kvmgt_page_track_write;
+ vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
+ kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node);
+
+ debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
+ &vgpu->nr_cache_entries);
+
+ intel_gvt_activate_vgpu(vgpu);
+
+ atomic_set(&vgpu->released, 0);
+ return 0;
undo_group:
- vfio_group_put_external_user(vdev->vfio_group);
- vdev->vfio_group = NULL;
+ vfio_group_put_external_user(vgpu->vfio_group);
+ vgpu->vfio_group = NULL;
undo_register:
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
- &vdev->group_notifier);
+ vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY,
+ &vgpu->group_notifier);
undo_iommu:
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &vdev->iommu_notifier);
+ vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->iommu_notifier);
out:
return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct eventfd_ctx *trigger;
- trigger = vdev->msi_trigger;
+ trigger = vgpu->msi_trigger;
if (trigger) {
eventfd_ctx_put(trigger);
- vdev->msi_trigger = NULL;
+ vgpu->msi_trigger = NULL;
}
}
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- struct kvmgt_guest_info *info;
int ret;
- if (!handle_valid(vgpu->handle))
+ if (!vgpu->attached)
return;
- if (atomic_cmpxchg(&vdev->released, 0, 1))
+ if (atomic_cmpxchg(&vgpu->released, 0, 1))
return;
- intel_gvt_ops->vgpu_release(vgpu);
+ intel_gvt_release_vgpu(vgpu);
- ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
- &vdev->iommu_notifier);
+ ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->iommu_notifier);
drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for iommu failed: %d\n", ret);
- ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
- &vdev->group_notifier);
+ ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_GROUP_NOTIFY,
+ &vgpu->group_notifier);
drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for group failed: %d\n", ret);
- /* dereference module reference taken at open */
- module_put(THIS_MODULE);
+ debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
- info = (struct kvmgt_guest_info *)vgpu->handle;
- kvmgt_guest_exit(info);
+ kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node);
+ kvm_put_kvm(vgpu->kvm);
+ kvmgt_protect_table_destroy(vgpu);
+ gvt_cache_destroy(vgpu);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
- vfio_group_put_external_user(vdev->vfio_group);
+ vfio_group_put_external_user(vgpu->vfio_group);
- vdev->kvm = NULL;
- vgpu->handle = 0;
+ vgpu->kvm = NULL;
+ vgpu->attached = false;
}
-static void intel_vgpu_close_device(struct mdev_device *mdev)
+static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
-
- __intel_vgpu_release(vgpu);
+ __intel_vgpu_release(vfio_dev_to_vgpu(vfio_dev));
}
static void intel_vgpu_release_work(struct work_struct *work)
{
- struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
- release_work);
+ struct intel_vgpu *vgpu =
+ container_of(work, struct intel_vgpu, release_work);
- __intel_vgpu_release(vdev->vgpu);
+ __intel_vgpu_release(vgpu);
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1070,10 +978,10 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
int ret;
if (is_write)
- ret = intel_gvt_ops->emulate_mmio_write(vgpu,
+ ret = intel_vgpu_emulate_mmio_write(vgpu,
bar_start + off, buf, count);
else
- ret = intel_gvt_ops->emulate_mmio_read(vgpu,
+ ret = intel_vgpu_emulate_mmio_read(vgpu,
bar_start + off, buf, count);
return ret;
}
@@ -1111,17 +1019,15 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return 0;
}
-static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
+static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -1129,10 +1035,10 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
if (is_write)
- ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
+ ret = intel_vgpu_emulate_cfg_write(vgpu, pos,
buf, count);
else
- ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
+ ret = intel_vgpu_emulate_cfg_read(vgpu, pos,
buf, count);
break;
case VFIO_PCI_BAR0_REGION_INDEX:
@@ -1150,20 +1056,19 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_ROM_REGION_INDEX:
break;
default:
- if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
return -EINVAL;
index -= VFIO_PCI_NUM_REGIONS;
- return vdev->region[index].ops->rw(vgpu, buf, count,
+ return vgpu->region[index].ops->rw(vgpu, buf, count,
ppos, is_write);
}
return ret == 0 ? count : ret;
}
-static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
+static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct intel_gvt *gvt = vgpu->gvt;
int offset;
@@ -1180,9 +1085,10 @@ static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
true : false;
}
-static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
+static ssize_t intel_vgpu_read(struct vfio_device *vfio_dev, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned int done = 0;
int ret;
@@ -1191,10 +1097,10 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
/* Only support GGTT entry 8 bytes read */
if (count >= 8 && !(*ppos % 8) &&
- gtt_entry(mdev, ppos)) {
+ gtt_entry(vgpu, ppos)) {
u64 val;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
@@ -1206,7 +1112,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 4 && !(*ppos % 4)) {
u32 val;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
@@ -1218,7 +1124,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
@@ -1230,7 +1136,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else {
u8 val;
- ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
+ ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos,
false);
if (ret <= 0)
goto read_err;
@@ -1253,10 +1159,11 @@ read_err:
return -EFAULT;
}
-static ssize_t intel_vgpu_write(struct mdev_device *mdev,
+static ssize_t intel_vgpu_write(struct vfio_device *vfio_dev,
const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned int done = 0;
int ret;
@@ -1265,13 +1172,13 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
/* Only support GGTT entry 8 bytes write */
if (count >= 8 && !(*ppos % 8) &&
- gtt_entry(mdev, ppos)) {
+ gtt_entry(vgpu, ppos)) {
u64 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
@@ -1283,7 +1190,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
@@ -1295,7 +1202,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, (char *)&val,
+ ret = intel_vgpu_rw(vgpu, (char *)&val,
sizeof(val), ppos, true);
if (ret <= 0)
goto write_err;
@@ -1307,7 +1214,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, &val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, &val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
@@ -1326,13 +1233,14 @@ write_err:
return -EFAULT;
}
-static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
+static int intel_vgpu_mmap(struct vfio_device *vfio_dev,
+ struct vm_area_struct *vma)
{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned int index;
u64 virtaddr;
unsigned long req_size, pgoff, req_start;
pgprot_t pg_prot;
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
if (index >= VFIO_PCI_ROM_REGION_INDEX)
@@ -1407,7 +1315,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger);
}
- kvmgt_vdev(vgpu)->msi_trigger = trigger;
+ vgpu->msi_trigger = trigger;
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
@@ -1455,11 +1363,10 @@ static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
return func(vgpu, index, start, count, flags, data);
}
-static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
+static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
unsigned long arg)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned long minsz;
gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
@@ -1478,7 +1385,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS +
- vdev->num_regions;
+ vgpu->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -1569,22 +1476,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
.header.version = 1 };
if (info.index >= VFIO_PCI_NUM_REGIONS +
- vdev->num_regions)
+ vgpu->num_regions)
return -EINVAL;
info.index =
array_index_nospec(info.index,
VFIO_PCI_NUM_REGIONS +
- vdev->num_regions);
+ vgpu->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset =
VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vdev->region[i].size;
- info.flags = vdev->region[i].flags;
+ info.size = vgpu->region[i].size;
+ info.flags = vgpu->region[i].flags;
- cap_type.type = vdev->region[i].type;
- cap_type.subtype = vdev->region[i].subtype;
+ cap_type.type = vgpu->region[i].type;
+ cap_type.subtype = vgpu->region[i].subtype;
ret = vfio_info_add_capability(&caps,
&cap_type.header,
@@ -1700,7 +1607,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
- intel_gvt_ops->vgpu_reset(vgpu);
+ intel_gvt_reset_vgpu(vgpu);
return 0;
} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
struct vfio_device_gfx_plane_info dmabuf;
@@ -1713,7 +1620,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (dmabuf.argsz < minsz)
return -EINVAL;
- ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
+ ret = intel_vgpu_query_plane(vgpu, &dmabuf);
if (ret != 0)
return ret;
@@ -1721,14 +1628,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
__u32 dmabuf_id;
- __s32 dmabuf_fd;
if (get_user(dmabuf_id, (__u32 __user *)arg))
return -EFAULT;
-
- dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
- return dmabuf_fd;
-
+ return intel_vgpu_get_dmabuf(vgpu, dmabuf_id);
}
return -ENOTTY;
@@ -1738,14 +1641,9 @@ static ssize_t
vgpu_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct mdev_device *mdev = mdev_from_dev(dev);
+ struct intel_vgpu *vgpu = dev_get_drvdata(dev);
- if (mdev) {
- struct intel_vgpu *vgpu = (struct intel_vgpu *)
- mdev_get_drvdata(mdev);
- return sprintf(buf, "%d\n", vgpu->id);
- }
- return sprintf(buf, "\n");
+ return sprintf(buf, "%d\n", vgpu->id);
}
static DEVICE_ATTR_RO(vgpu_id);
@@ -1765,57 +1663,78 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
-static struct mdev_parent_ops intel_vgpu_ops = {
- .mdev_attr_groups = intel_vgpu_groups,
- .create = intel_vgpu_create,
- .remove = intel_vgpu_remove,
-
- .open_device = intel_vgpu_open_device,
- .close_device = intel_vgpu_close_device,
-
- .read = intel_vgpu_read,
- .write = intel_vgpu_write,
- .mmap = intel_vgpu_mmap,
- .ioctl = intel_vgpu_ioctl,
+static const struct vfio_device_ops intel_vgpu_dev_ops = {
+ .open_device = intel_vgpu_open_device,
+ .close_device = intel_vgpu_close_device,
+ .read = intel_vgpu_read,
+ .write = intel_vgpu_write,
+ .mmap = intel_vgpu_mmap,
+ .ioctl = intel_vgpu_ioctl,
};
-static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
+static int intel_vgpu_probe(struct mdev_device *mdev)
{
+ struct device *pdev = mdev_parent_dev(mdev);
+ struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt;
+ struct intel_vgpu_type *type;
+ struct intel_vgpu *vgpu;
int ret;
- ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt);
- if (ret)
- return ret;
+ type = &gvt->types[mdev_get_type_group_id(mdev)];
+ if (!type)
+ return -EINVAL;
- intel_gvt_ops = ops;
- intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups;
+ vgpu = intel_gvt_create_vgpu(gvt, type);
+ if (IS_ERR(vgpu)) {
+ gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
+ return PTR_ERR(vgpu);
+ }
- ret = mdev_register_device(dev, &intel_vgpu_ops);
- if (ret)
- intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
+ INIT_WORK(&vgpu->release_work, intel_vgpu_release_work);
+ vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev,
+ &intel_vgpu_dev_ops);
- return ret;
+ dev_set_drvdata(&mdev->dev, vgpu);
+ ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
+ if (ret) {
+ intel_gvt_destroy_vgpu(vgpu);
+ return ret;
+ }
+
+ gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
+ dev_name(mdev_dev(mdev)));
+ return 0;
}
-static void kvmgt_host_exit(struct device *dev, void *gvt)
+static void intel_vgpu_remove(struct mdev_device *mdev)
{
- mdev_unregister_device(dev);
- intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
-}
+ struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
-static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
+ if (WARN_ON_ONCE(vgpu->attached))
+ return;
+ intel_gvt_destroy_vgpu(vgpu);
+}
+
+static struct mdev_driver intel_vgpu_mdev_driver = {
+ .driver = {
+ .name = "intel_vgpu_mdev",
+ .owner = THIS_MODULE,
+ .dev_groups = intel_vgpu_groups,
+ },
+ .probe = intel_vgpu_probe,
+ .remove = intel_vgpu_remove,
+ .supported_type_groups = gvt_vgpu_type_groups,
+};
+
+int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
{
- struct kvmgt_guest_info *info;
- struct kvm *kvm;
+ struct kvm *kvm = info->kvm;
struct kvm_memory_slot *slot;
int idx;
- if (!handle_valid(handle))
+ if (!info->attached)
return -ESRCH;
- info = (struct kvmgt_guest_info *)handle;
- kvm = info->kvm;
-
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
@@ -1837,19 +1756,15 @@ out:
return 0;
}
-static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
+int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
{
- struct kvmgt_guest_info *info;
- struct kvm *kvm;
+ struct kvm *kvm = info->kvm;
struct kvm_memory_slot *slot;
int idx;
- if (!handle_valid(handle))
+ if (!info->attached)
return 0;
- info = (struct kvmgt_guest_info *)handle;
- kvm = info->kvm;
-
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
@@ -1875,11 +1790,11 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *val, int len,
struct kvm_page_track_notifier_node *node)
{
- struct kvmgt_guest_info *info = container_of(node,
- struct kvmgt_guest_info, track_node);
+ struct intel_vgpu *info =
+ container_of(node, struct intel_vgpu, track_node);
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
- intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
+ intel_vgpu_page_track_handler(info, gpa,
(void *)val, len);
}
@@ -1889,8 +1804,8 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
{
int i;
gfn_t gfn;
- struct kvmgt_guest_info *info = container_of(node,
- struct kvmgt_guest_info, track_node);
+ struct intel_vgpu *info =
+ container_of(node, struct intel_vgpu, track_node);
write_lock(&kvm->mmu_lock);
for (i = 0; i < slot->npages; i++) {
@@ -1904,182 +1819,32 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
write_unlock(&kvm->mmu_lock);
}
-static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
-{
- struct intel_vgpu *itr;
- struct kvmgt_guest_info *info;
- int id;
- bool ret = false;
-
- mutex_lock(&vgpu->gvt->lock);
- for_each_active_vgpu(vgpu->gvt, itr, id) {
- if (!handle_valid(itr->handle))
- continue;
-
- info = (struct kvmgt_guest_info *)itr->handle;
- if (kvm && kvm == info->kvm) {
- ret = true;
- goto out;
- }
- }
-out:
- mutex_unlock(&vgpu->gvt->lock);
- return ret;
-}
-
-static int kvmgt_guest_init(struct mdev_device *mdev)
-{
- struct kvmgt_guest_info *info;
- struct intel_vgpu *vgpu;
- struct kvmgt_vdev *vdev;
- struct kvm *kvm;
-
- vgpu = mdev_get_drvdata(mdev);
- if (handle_valid(vgpu->handle))
- return -EEXIST;
-
- vdev = kvmgt_vdev(vgpu);
- kvm = vdev->kvm;
- if (!kvm || kvm->mm != current->mm) {
- gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- return -ESRCH;
- }
-
- if (__kvmgt_vgpu_exist(vgpu, kvm))
- return -EEXIST;
-
- info = vzalloc(sizeof(struct kvmgt_guest_info));
- if (!info)
- return -ENOMEM;
-
- vgpu->handle = (unsigned long)info;
- info->vgpu = vgpu;
- info->kvm = kvm;
- kvm_get_kvm(info->kvm);
-
- kvmgt_protect_table_init(info);
- gvt_cache_init(vgpu);
-
- info->track_node.track_write = kvmgt_page_track_write;
- info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
- kvm_page_track_register_notifier(kvm, &info->track_node);
-
- debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
- &vdev->nr_cache_entries);
- return 0;
-}
-
-static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
-{
- debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME,
- info->vgpu->debugfs));
-
- kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
- kvm_put_kvm(info->kvm);
- kvmgt_protect_table_destroy(info);
- gvt_cache_destroy(info->vgpu);
- vfree(info);
-
- return true;
-}
-
-static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
-{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
-
- vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
-
- if (!vgpu->vdev)
- return -ENOMEM;
-
- kvmgt_vdev(vgpu)->vgpu = vgpu;
-
- return 0;
-}
-
-static void kvmgt_detach_vgpu(void *p_vgpu)
+void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
{
int i;
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- if (!vdev->region)
+ if (!vgpu->region)
return;
- for (i = 0; i < vdev->num_regions; i++)
- if (vdev->region[i].ops->release)
- vdev->region[i].ops->release(vgpu,
- &vdev->region[i]);
- vdev->num_regions = 0;
- kfree(vdev->region);
- vdev->region = NULL;
-
- kfree(vdev);
+ for (i = 0; i < vgpu->num_regions; i++)
+ if (vgpu->region[i].ops->release)
+ vgpu->region[i].ops->release(vgpu,
+ &vgpu->region[i]);
+ vgpu->num_regions = 0;
+ kfree(vgpu->region);
+ vgpu->region = NULL;
}
-static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
-{
- struct kvmgt_guest_info *info;
- struct intel_vgpu *vgpu;
- struct kvmgt_vdev *vdev;
-
- if (!handle_valid(handle))
- return -ESRCH;
-
- info = (struct kvmgt_guest_info *)handle;
- vgpu = info->vgpu;
- vdev = kvmgt_vdev(vgpu);
-
- /*
- * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
- * config and mmio register isn't restored to default during guest
- * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
- * may be enabled, then once this vgpu is active, it will get inject
- * vblank interrupt request. But msi_trigger is null until msi is
- * enabled by guest. so if msi_trigger is null, success is still
- * returned and don't inject interrupt into guest.
- */
- if (vdev->msi_trigger == NULL)
- return 0;
-
- if (eventfd_signal(vdev->msi_trigger, 1) == 1)
- return 0;
-
- return -EFAULT;
-}
-
-static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
-{
- struct kvmgt_guest_info *info;
- kvm_pfn_t pfn;
-
- if (!handle_valid(handle))
- return INTEL_GVT_INVALID_ADDR;
-
- info = (struct kvmgt_guest_info *)handle;
-
- pfn = gfn_to_pfn(info->kvm, gfn);
- if (is_error_noslot_pfn(pfn))
- return INTEL_GVT_INVALID_ADDR;
-
- return pfn;
-}
-
-static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
+int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr)
{
- struct intel_vgpu *vgpu;
- struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return -EINVAL;
- vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
- vdev = kvmgt_vdev(vgpu);
-
- mutex_lock(&vdev->cache_lock);
+ mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_gfn(vgpu, gfn);
if (!entry) {
@@ -2107,36 +1872,31 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
*dma_addr = entry->dma_addr;
}
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
return 0;
err_unmap:
gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
return ret;
}
-static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
+int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
{
- struct kvmgt_guest_info *info;
- struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret = 0;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return -ENODEV;
- info = (struct kvmgt_guest_info *)handle;
- vdev = kvmgt_vdev(info->vgpu);
-
- mutex_lock(&vdev->cache_lock);
- entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ mutex_lock(&vgpu->cache_lock);
+ entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
kref_get(&entry->ref);
else
ret = -ENOMEM;
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
return ret;
}
@@ -2150,109 +1910,290 @@ static void __gvt_dma_release(struct kref *ref)
__gvt_cache_remove_entry(entry->vgpu, entry);
}
-static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
+void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
{
- struct intel_vgpu *vgpu;
- struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return;
- vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
- vdev = kvmgt_vdev(vgpu);
-
- mutex_lock(&vdev->cache_lock);
+ mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
kref_put(&entry->ref, __gvt_dma_release);
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
}
-static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
- void *buf, unsigned long len, bool write)
+static void init_device_info(struct intel_gvt *gvt)
{
- struct kvmgt_guest_info *info;
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
- if (!handle_valid(handle))
- return -ESRCH;
+ info->max_support_vgpus = 8;
+ info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
+ info->mmio_size = 2 * 1024 * 1024;
+ info->mmio_bar = 0;
+ info->gtt_start_offset = 8 * 1024 * 1024;
+ info->gtt_entry_size = 8;
+ info->gtt_entry_size_shift = 3;
+ info->gmadr_bytes_in_cmd = 8;
+ info->max_surface_size = 36 * 1024 * 1024;
+ info->msi_cap_offset = pdev->msi_cap;
+}
- info = (struct kvmgt_guest_info *)handle;
+static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ int id;
- return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
- gpa, buf, len, write);
+ mutex_lock(&gvt->lock);
+ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
+ (void *)&gvt->service_request)) {
+ if (vgpu->active)
+ intel_vgpu_emulate_vblank(vgpu);
+ }
+ }
+ mutex_unlock(&gvt->lock);
}
-static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
- void *buf, unsigned long len)
+static int gvt_service_thread(void *data)
{
- return kvmgt_rw_gpa(handle, gpa, buf, len, false);
+ struct intel_gvt *gvt = (struct intel_gvt *)data;
+ int ret;
+
+ gvt_dbg_core("service thread start\n");
+
+ while (!kthread_should_stop()) {
+ ret = wait_event_interruptible(gvt->service_thread_wq,
+ kthread_should_stop() || gvt->service_request);
+
+ if (kthread_should_stop())
+ break;
+
+ if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
+ continue;
+
+ intel_gvt_test_and_emulate_vblank(gvt);
+
+ if (test_bit(INTEL_GVT_REQUEST_SCHED,
+ (void *)&gvt->service_request) ||
+ test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
+ (void *)&gvt->service_request)) {
+ intel_gvt_schedule(gvt);
+ }
+ }
+
+ return 0;
}
-static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
- void *buf, unsigned long len)
+static void clean_service_thread(struct intel_gvt *gvt)
{
- return kvmgt_rw_gpa(handle, gpa, buf, len, true);
+ kthread_stop(gvt->service_thread);
}
-static unsigned long kvmgt_virt_to_pfn(void *addr)
+static int init_service_thread(struct intel_gvt *gvt)
{
- return PFN_DOWN(__pa(addr));
+ init_waitqueue_head(&gvt->service_thread_wq);
+
+ gvt->service_thread = kthread_run(gvt_service_thread,
+ gvt, "gvt_service_thread");
+ if (IS_ERR(gvt->service_thread)) {
+ gvt_err("fail to start service thread.\n");
+ return PTR_ERR(gvt->service_thread);
+ }
+ return 0;
}
-static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
+/**
+ * intel_gvt_clean_device - clean a GVT device
+ * @i915: i915 private
+ *
+ * This function is called at the driver unloading stage, to free the
+ * resources owned by a GVT device.
+ *
+ */
+static void intel_gvt_clean_device(struct drm_i915_private *i915)
{
- struct kvmgt_guest_info *info;
- struct kvm *kvm;
- int idx;
- bool ret;
+ struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
- if (!handle_valid(handle))
- return false;
+ if (drm_WARN_ON(&i915->drm, !gvt))
+ return;
- info = (struct kvmgt_guest_info *)handle;
- kvm = info->kvm;
+ mdev_unregister_device(i915->drm.dev);
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+ intel_gvt_clean_vgpu_types(gvt);
- idx = srcu_read_lock(&kvm->srcu);
- ret = kvm_is_visible_gfn(kvm, gfn);
- srcu_read_unlock(&kvm->srcu, idx);
+ intel_gvt_debugfs_clean(gvt);
+ clean_service_thread(gvt);
+ intel_gvt_clean_cmd_parser(gvt);
+ intel_gvt_clean_sched_policy(gvt);
+ intel_gvt_clean_workload_scheduler(gvt);
+ intel_gvt_clean_gtt(gvt);
+ intel_gvt_free_firmware(gvt);
+ intel_gvt_clean_mmio_info(gvt);
+ idr_destroy(&gvt->vgpu_idr);
+
+ kfree(i915->gvt);
+}
+
+/**
+ * intel_gvt_init_device - initialize a GVT device
+ * @i915: drm i915 private data
+ *
+ * This function is called at the initialization stage, to initialize
+ * necessary GVT components.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+static int intel_gvt_init_device(struct drm_i915_private *i915)
+{
+ struct intel_gvt *gvt;
+ struct intel_vgpu *vgpu;
+ int ret;
+
+ if (drm_WARN_ON(&i915->drm, i915->gvt))
+ return -EEXIST;
+
+ gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
+ if (!gvt)
+ return -ENOMEM;
+
+ gvt_dbg_core("init gvt device\n");
+
+ idr_init_base(&gvt->vgpu_idr, 1);
+ spin_lock_init(&gvt->scheduler.mmio_context_lock);
+ mutex_init(&gvt->lock);
+ mutex_init(&gvt->sched_lock);
+ gvt->gt = to_gt(i915);
+ i915->gvt = gvt;
+
+ init_device_info(gvt);
+
+ ret = intel_gvt_setup_mmio_info(gvt);
+ if (ret)
+ goto out_clean_idr;
+
+ intel_gvt_init_engine_mmio_context(gvt);
+
+ ret = intel_gvt_load_firmware(gvt);
+ if (ret)
+ goto out_clean_mmio_info;
+
+ ret = intel_gvt_init_irq(gvt);
+ if (ret)
+ goto out_free_firmware;
+
+ ret = intel_gvt_init_gtt(gvt);
+ if (ret)
+ goto out_free_firmware;
+ ret = intel_gvt_init_workload_scheduler(gvt);
+ if (ret)
+ goto out_clean_gtt;
+
+ ret = intel_gvt_init_sched_policy(gvt);
+ if (ret)
+ goto out_clean_workload_scheduler;
+
+ ret = intel_gvt_init_cmd_parser(gvt);
+ if (ret)
+ goto out_clean_sched_policy;
+
+ ret = init_service_thread(gvt);
+ if (ret)
+ goto out_clean_cmd_parser;
+
+ ret = intel_gvt_init_vgpu_types(gvt);
+ if (ret)
+ goto out_clean_thread;
+
+ vgpu = intel_gvt_create_idle_vgpu(gvt);
+ if (IS_ERR(vgpu)) {
+ ret = PTR_ERR(vgpu);
+ gvt_err("failed to create idle vgpu\n");
+ goto out_clean_types;
+ }
+ gvt->idle_vgpu = vgpu;
+
+ intel_gvt_debugfs_init(gvt);
+
+ ret = intel_gvt_init_vgpu_type_groups(gvt);
+ if (ret)
+ goto out_destroy_idle_vgpu;
+
+ ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver);
+ if (ret)
+ goto out_cleanup_vgpu_type_groups;
+
+ gvt_dbg_core("gvt device initialization is done\n");
+ return 0;
+
+out_cleanup_vgpu_type_groups:
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+out_destroy_idle_vgpu:
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+ intel_gvt_debugfs_clean(gvt);
+out_clean_types:
+ intel_gvt_clean_vgpu_types(gvt);
+out_clean_thread:
+ clean_service_thread(gvt);
+out_clean_cmd_parser:
+ intel_gvt_clean_cmd_parser(gvt);
+out_clean_sched_policy:
+ intel_gvt_clean_sched_policy(gvt);
+out_clean_workload_scheduler:
+ intel_gvt_clean_workload_scheduler(gvt);
+out_clean_gtt:
+ intel_gvt_clean_gtt(gvt);
+out_free_firmware:
+ intel_gvt_free_firmware(gvt);
+out_clean_mmio_info:
+ intel_gvt_clean_mmio_info(gvt);
+out_clean_idr:
+ idr_destroy(&gvt->vgpu_idr);
+ kfree(gvt);
+ i915->gvt = NULL;
return ret;
}
-static const struct intel_gvt_mpt kvmgt_mpt = {
- .type = INTEL_GVT_HYPERVISOR_KVM,
- .host_init = kvmgt_host_init,
- .host_exit = kvmgt_host_exit,
- .attach_vgpu = kvmgt_attach_vgpu,
- .detach_vgpu = kvmgt_detach_vgpu,
- .inject_msi = kvmgt_inject_msi,
- .from_virt_to_mfn = kvmgt_virt_to_pfn,
- .enable_page_track = kvmgt_page_track_add,
- .disable_page_track = kvmgt_page_track_remove,
- .read_gpa = kvmgt_read_gpa,
- .write_gpa = kvmgt_write_gpa,
- .gfn_to_mfn = kvmgt_gfn_to_pfn,
- .dma_map_guest_page = kvmgt_dma_map_guest_page,
- .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
- .dma_pin_guest_page = kvmgt_dma_pin_guest_page,
- .set_opregion = kvmgt_set_opregion,
- .set_edid = kvmgt_set_edid,
- .get_vfio_device = kvmgt_get_vfio_device,
- .put_vfio_device = kvmgt_put_vfio_device,
- .is_valid_gfn = kvmgt_is_valid_gfn,
+static void intel_gvt_pm_resume(struct drm_i915_private *i915)
+{
+ struct intel_gvt *gvt = i915->gvt;
+
+ intel_gvt_restore_fence(gvt);
+ intel_gvt_restore_mmio(gvt);
+ intel_gvt_restore_ggtt(gvt);
+}
+
+static const struct intel_vgpu_ops intel_gvt_vgpu_ops = {
+ .init_device = intel_gvt_init_device,
+ .clean_device = intel_gvt_clean_device,
+ .pm_resume = intel_gvt_pm_resume,
};
static int __init kvmgt_init(void)
{
- if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
- return -ENODEV;
- return 0;
+ int ret;
+
+ ret = intel_gvt_set_ops(&intel_gvt_vgpu_ops);
+ if (ret)
+ return ret;
+
+ ret = mdev_register_driver(&intel_vgpu_mdev_driver);
+ if (ret)
+ intel_gvt_clear_ops(&intel_gvt_vgpu_ops);
+ return ret;
}
static void __exit kvmgt_exit(void)
{
- intel_gvt_unregister_hypervisor();
+ mdev_unregister_driver(&intel_vgpu_mdev_driver);
+ intel_gvt_clear_ops(&intel_gvt_vgpu_ops);
}
module_init(kvmgt_init);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 5db0ef83d522..9acc00505fde 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -139,7 +139,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
}
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
- ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
+ ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes);
goto out;
}
@@ -215,7 +215,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
}
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
- ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
+ ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes);
goto out;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 7c26af39fbfc..bba154e38705 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -72,7 +72,6 @@ struct intel_gvt_mmio_info {
const struct intel_engine_cs *
intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
-bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
deleted file mode 100644
index e6c5a792a49a..000000000000
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- * Eddie Dong <eddie.dong@intel.com>
- * Dexuan Cui
- * Jike Song <jike.song@intel.com>
- *
- * Contributors:
- * Zhi Wang <zhi.a.wang@intel.com>
- *
- */
-
-#ifndef _GVT_MPT_H_
-#define _GVT_MPT_H_
-
-#include "gvt.h"
-
-/**
- * DOC: Hypervisor Service APIs for GVT-g Core Logic
- *
- * This is the glue layer between specific hypervisor MPT modules and GVT-g core
- * logic. Each kind of hypervisor MPT module provides a collection of function
- * callbacks and will be attached to GVT host when the driver is loading.
- * GVT-g core logic will call these APIs to request specific services from
- * hypervisor.
- */
-
-/**
- * intel_gvt_hypervisor_host_init - init GVT-g host side
- *
- * Returns:
- * Zero on success, negative error code if failed
- */
-static inline int intel_gvt_hypervisor_host_init(struct device *dev,
- void *gvt, const void *ops)
-{
- if (!intel_gvt_host.mpt->host_init)
- return -ENODEV;
-
- return intel_gvt_host.mpt->host_init(dev, gvt, ops);
-}
-
-/**
- * intel_gvt_hypervisor_host_exit - exit GVT-g host side
- */
-static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
-{
- /* optional to provide */
- if (!intel_gvt_host.mpt->host_exit)
- return;
-
- intel_gvt_host.mpt->host_exit(dev, gvt);
-}
-
-/**
- * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
- * related stuffs inside hypervisor.
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
-{
- /* optional to provide */
- if (!intel_gvt_host.mpt->attach_vgpu)
- return 0;
-
- return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
-}
-
-/**
- * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
- * related stuffs inside hypervisor.
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
-{
- /* optional to provide */
- if (!intel_gvt_host.mpt->detach_vgpu)
- return;
-
- intel_gvt_host.mpt->detach_vgpu(vgpu);
-}
-
-#define MSI_CAP_CONTROL(offset) (offset + 2)
-#define MSI_CAP_ADDRESS(offset) (offset + 4)
-#define MSI_CAP_DATA(offset) (offset + 8)
-#define MSI_CAP_EN 0x1
-
-/**
- * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
-{
- unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
- u16 control, data;
- u32 addr;
- int ret;
-
- control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
- addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
- data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
-
- /* Do not generate MSI if MSIEN is disable */
- if (!(control & MSI_CAP_EN))
- return 0;
-
- if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
- return -EINVAL;
-
- trace_inject_msi(vgpu->id, addr, data);
-
- ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
- if (ret)
- return ret;
- return 0;
-}
-
-/**
- * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
- * @p: host kernel virtual address
- *
- * Returns:
- * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
- */
-static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
-{
- return intel_gvt_host.mpt->from_virt_to_mfn(p);
-}
-
-/**
- * intel_gvt_hypervisor_enable_page_track - track a guest page
- * @vgpu: a vGPU
- * @gfn: the gfn of guest
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_enable_page_track(
- struct intel_vgpu *vgpu, unsigned long gfn)
-{
- return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
-}
-
-/**
- * intel_gvt_hypervisor_disable_page_track - untrack a guest page
- * @vgpu: a vGPU
- * @gfn: the gfn of guest
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_disable_page_track(
- struct intel_vgpu *vgpu, unsigned long gfn)
-{
- return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
-}
-
-/**
- * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
- * @vgpu: a vGPU
- * @gpa: guest physical address
- * @buf: host data buffer
- * @len: data length
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
- unsigned long gpa, void *buf, unsigned long len)
-{
- return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
-}
-
-/**
- * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
- * @vgpu: a vGPU
- * @gpa: guest physical address
- * @buf: host data buffer
- * @len: data length
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
- unsigned long gpa, void *buf, unsigned long len)
-{
- return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
-}
-
-/**
- * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
- * @vgpu: a vGPU
- * @gpfn: guest pfn
- *
- * Returns:
- * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
- */
-static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
- struct intel_vgpu *vgpu, unsigned long gfn)
-{
- return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
-}
-
-/**
- * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
- * @vgpu: a vGPU
- * @gfn: guest pfn
- * @size: page size
- * @dma_addr: retrieve allocated dma addr
- *
- * Returns:
- * 0 on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_dma_map_guest_page(
- struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
- dma_addr_t *dma_addr)
-{
- return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
- dma_addr);
-}
-
-/**
- * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
- * @vgpu: a vGPU
- * @dma_addr: the mapped dma addr
- */
-static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
- struct intel_vgpu *vgpu, dma_addr_t dma_addr)
-{
- intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
-}
-
-/**
- * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
- * @vgpu: a vGPU
- * @dma_addr: guest dma addr
- *
- * Returns:
- * 0 on success, negative error code if failed.
- */
-static inline int
-intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
- dma_addr_t dma_addr)
-{
- return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
-}
-
-/**
- * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
- * @vgpu: a vGPU
- * @gfn: guest PFN
- * @mfn: host PFN
- * @nr: amount of PFNs
- * @map: map or unmap
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
- struct intel_vgpu *vgpu, unsigned long gfn,
- unsigned long mfn, unsigned int nr,
- bool map)
-{
- /* a MPT implementation could have MMIO mapped elsewhere */
- if (!intel_gvt_host.mpt->map_gfn_to_mfn)
- return 0;
-
- return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
- map);
-}
-
-/**
- * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
- * @vgpu: a vGPU
- * @start: the beginning of the guest physical address region
- * @end: the end of the guest physical address region
- * @map: map or unmap
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_set_trap_area(
- struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
-{
- /* a MPT implementation could have MMIO trapped elsewhere */
- if (!intel_gvt_host.mpt->set_trap_area)
- return 0;
-
- return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
-}
-
-/**
- * intel_gvt_hypervisor_set_opregion - Set opregion for guest
- * @vgpu: a vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
-{
- if (!intel_gvt_host.mpt->set_opregion)
- return 0;
-
- return intel_gvt_host.mpt->set_opregion(vgpu);
-}
-
-/**
- * intel_gvt_hypervisor_set_edid - Set EDID region for guest
- * @vgpu: a vGPU
- * @port_num: display port number
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
- int port_num)
-{
- if (!intel_gvt_host.mpt->set_edid)
- return 0;
-
- return intel_gvt_host.mpt->set_edid(vgpu, port_num);
-}
-
-/**
- * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
- * @vgpu: a vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
-{
- if (!intel_gvt_host.mpt->get_vfio_device)
- return 0;
-
- return intel_gvt_host.mpt->get_vfio_device(vgpu);
-}
-
-/**
- * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
- * @vgpu: a vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
-{
- if (!intel_gvt_host.mpt->put_vfio_device)
- return;
-
- intel_gvt_host.mpt->put_vfio_device(vgpu);
-}
-
-/**
- * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
- * @vgpu: a vGPU
- * @gfn: guest PFN
- *
- * Returns:
- * true on valid gfn, false on not.
- */
-static inline bool intel_gvt_hypervisor_is_valid_gfn(
- struct intel_vgpu *vgpu, unsigned long gfn)
-{
- if (!intel_gvt_host.mpt->is_valid_gfn)
- return true;
-
- return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
-}
-
-int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *);
-void intel_gvt_unregister_hypervisor(void);
-
-#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 33569b910ed5..d2bed466540a 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -255,33 +255,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
return 0;
}
-static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
-{
- u64 mfn;
- int i, ret;
-
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
- mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
- + i * PAGE_SIZE);
- if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("fail to get MFN from VA\n");
- return -EINVAL;
- }
- ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
- vgpu_opregion(vgpu)->gfn[i],
- mfn, 1, map);
- if (ret) {
- gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
- ret);
- return ret;
- }
- }
-
- vgpu_opregion(vgpu)->mapped = map;
-
- return 0;
-}
-
/**
* intel_vgpu_opregion_base_write_handler - Opregion base register write handler
*
@@ -294,34 +267,13 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{
- int i, ret = 0;
+ int i;
gvt_dbg_core("emulate opregion from kernel\n");
- switch (intel_gvt_host.hypervisor_type) {
- case INTEL_GVT_HYPERVISOR_KVM:
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
- vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
- break;
- case INTEL_GVT_HYPERVISOR_XEN:
- /**
- * Wins guest on Xengt will write this register twice: xen
- * hvmloader and windows graphic driver.
- */
- if (vgpu_opregion(vgpu)->mapped)
- map_vgpu_opregion(vgpu, false);
-
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
- vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
-
- ret = map_vgpu_opregion(vgpu, true);
- break;
- default:
- ret = -EINVAL;
- gvt_vgpu_err("not supported hypervisor\n");
- }
-
- return ret;
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+ return 0;
}
/**
@@ -336,12 +288,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
if (!vgpu_opregion(vgpu)->va)
return;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
- if (vgpu_opregion(vgpu)->mapped)
- map_vgpu_opregion(vgpu, false);
- } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
- /* Guest opregion is released by VFIO */
- }
+ /* Guest opregion is released by VFIO */
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
get_order(INTEL_GVT_OPREGION_SIZE));
@@ -470,39 +417,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
u64 scic_pa = 0, parm_pa = 0;
int ret;
- switch (intel_gvt_host.hypervisor_type) {
- case INTEL_GVT_HYPERVISOR_XEN:
- scic = *((u32 *)vgpu_opregion(vgpu)->va +
- INTEL_GVT_OPREGION_SCIC);
- parm = *((u32 *)vgpu_opregion(vgpu)->va +
- INTEL_GVT_OPREGION_PARM);
- break;
- case INTEL_GVT_HYPERVISOR_KVM:
- scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
- INTEL_GVT_OPREGION_SCIC;
- parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
- INTEL_GVT_OPREGION_PARM;
-
- ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
- &scic, sizeof(scic));
- if (ret) {
- gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
- ret, scic_pa, sizeof(scic));
- return ret;
- }
-
- ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
- &parm, sizeof(parm));
- if (ret) {
- gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
- ret, scic_pa, sizeof(scic));
- return ret;
- }
+ scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
+ INTEL_GVT_OPREGION_SCIC;
+ parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
+ INTEL_GVT_OPREGION_PARM;
+ ret = intel_gvt_read_gpa(vgpu, scic_pa, &scic, sizeof(scic));
+ if (ret) {
+ gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
- break;
- default:
- gvt_vgpu_err("not supported hypervisor\n");
- return -EINVAL;
+ ret = intel_gvt_read_gpa(vgpu, parm_pa, &parm, sizeof(parm));
+ if (ret) {
+ gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
}
if (!(swsci & SWSCI_SCI_SELECT)) {
@@ -535,34 +465,18 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
parm = 0;
out:
- switch (intel_gvt_host.hypervisor_type) {
- case INTEL_GVT_HYPERVISOR_XEN:
- *((u32 *)vgpu_opregion(vgpu)->va +
- INTEL_GVT_OPREGION_SCIC) = scic;
- *((u32 *)vgpu_opregion(vgpu)->va +
- INTEL_GVT_OPREGION_PARM) = parm;
- break;
- case INTEL_GVT_HYPERVISOR_KVM:
- ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
- &scic, sizeof(scic));
- if (ret) {
- gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
- ret, scic_pa, sizeof(scic));
- return ret;
- }
-
- ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
- &parm, sizeof(parm));
- if (ret) {
- gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
- ret, scic_pa, sizeof(scic));
- return ret;
- }
+ ret = intel_gvt_write_gpa(vgpu, scic_pa, &scic, sizeof(scic));
+ if (ret) {
+ gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
- break;
- default:
- gvt_vgpu_err("not supported hypervisor\n");
- return -EINVAL;
+ ret = intel_gvt_write_gpa(vgpu, parm_pa, &parm, sizeof(parm));
+ if (ret) {
+ gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
}
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index 84856022528e..3375b51c75f1 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -87,7 +87,7 @@ void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
track = radix_tree_delete(&vgpu->page_track_tree, gfn);
if (track) {
if (track->tracked)
- intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
+ intel_gvt_page_track_remove(vgpu, gfn);
kfree(track);
}
}
@@ -112,7 +112,7 @@ int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
if (track->tracked)
return 0;
- ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
+ ret = intel_gvt_page_track_add(vgpu, gfn);
if (ret)
return ret;
track->tracked = true;
@@ -139,7 +139,7 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
if (!track->tracked)
return 0;
- ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
+ ret = intel_gvt_page_track_remove(vgpu, gfn);
if (ret)
return ret;
track->tracked = false;
@@ -172,7 +172,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
if (unlikely(vgpu->failsafe)) {
/* Remove write protection to prevent furture traps. */
- intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
+ intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT);
} else {
ret = page_track->handler(page_track, gpa, data, bytes);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 7d666d34f9ff..d8216c63c39a 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -132,6 +132,13 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
-
#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4)
+
+/* XXX FIXME i915 has changed PP_XXX definition */
+#define PCH_PP_STATUS _MMIO(0xc7200)
+#define PCH_PP_CONTROL _MMIO(0xc7204)
+#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
+#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
+#define PCH_PP_DIVISOR _MMIO(0xc7210)
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 679476da0640..d6fe94cd0fdb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -150,10 +150,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
#define COPY_REG(name) \
- intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
#define COPY_REG_MASKED(name) {\
- intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val),\
&shadow_ring_context->name.val, 4);\
shadow_ring_context->name.val |= 0xffff << 16;\
@@ -167,7 +167,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
} else if (workload->engine->id == BCS0)
- intel_gvt_hypervisor_read_gpa(vgpu,
+ intel_gvt_read_gpa(vgpu,
workload->ring_context_gpa +
BCS_TILE_REGISTER_VAL_OFFSET,
(void *)shadow_ring_context +
@@ -178,7 +178,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
/* don't copy Ring Context (the first 0x50 dwords),
* only copy the Engine Context part from guest
*/
- intel_gvt_hypervisor_read_gpa(vgpu,
+ intel_gvt_read_gpa(vgpu,
workload->ring_context_gpa +
RING_CTX_SIZE,
(void *)shadow_ring_context +
@@ -245,7 +245,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
continue;
read:
- intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
+ intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size);
gpa_base = context_gpa;
gpa_size = I915_GTT_PAGE_SIZE;
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
@@ -911,8 +911,7 @@ static void update_guest_pdps(struct intel_vgpu *vgpu,
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
- intel_gvt_hypervisor_write_gpa(vgpu,
- gpa + i * 8, &pdp[7 - i], 4);
+ intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4);
}
static __maybe_unused bool
@@ -1007,13 +1006,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
continue;
write:
- intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
+ intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size);
gpa_base = context_gpa;
gpa_size = I915_GTT_PAGE_SIZE;
src = context_base + (i << I915_GTT_PAGE_SHIFT);
}
- intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
+ intel_gvt_write_gpa(vgpu, workload->ring_context_gpa +
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
shadow_ring_context = (void *) ctx->lrc_reg_state;
@@ -1028,7 +1027,7 @@ write:
}
#define COPY_REG(name) \
- intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
+ intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
COPY_REG(ctx_ctrl);
@@ -1036,7 +1035,7 @@ write:
#undef COPY_REG
- intel_gvt_hypervisor_write_gpa(vgpu,
+ intel_gvt_write_gpa(vgpu,
workload->ring_context_gpa +
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
@@ -1573,7 +1572,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
- intel_gvt_hypervisor_read_gpa(vgpu,
+ intel_gvt_read_gpa(vgpu,
gpa + i * 8, &pdp[7 - i], 4);
}
@@ -1644,10 +1643,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
return ERR_PTR(-EINVAL);
}
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_header.val), &head, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
guest_head = head;
@@ -1674,11 +1673,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
gvt_dbg_el("ring %s begin a new workload\n", engine->name);
/* record some ring buffer register values for scan and shadow */
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
if (!intel_gvt_ggtt_validate_range(vgpu, start,
@@ -1701,9 +1700,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
workload->rb_ctl = ctl;
if (engine->id == RCS0) {
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
workload->wa_ctx.indirect_ctx.guest_gma =
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 6d787750d279..020f1aa28322 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -377,7 +377,7 @@ TRACE_EVENT(render_mmio,
/* This part must be out of protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/gvt
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 8dddd0a940a1..46da19b3225d 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -293,7 +293,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_opregion(vgpu);
intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_clean_gtt(vgpu);
- intel_gvt_hypervisor_detach_vgpu(vgpu);
+ intel_vgpu_detach_regions(vgpu);
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
@@ -370,8 +370,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu *vgpu;
int ret;
- gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
- param->handle, param->low_gm_sz, param->high_gm_sz,
+ gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
+ param->low_gm_sz, param->high_gm_sz,
param->fence_sz);
vgpu = vzalloc(sizeof(*vgpu));
@@ -384,7 +384,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
goto out_free_vgpu;
vgpu->id = ret;
- vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
mutex_init(&vgpu->vgpu_lock);
@@ -405,13 +404,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
populate_pvinfo_page(vgpu);
- ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
- if (ret)
- goto out_clean_vgpu_resource;
-
ret = intel_vgpu_init_gtt(vgpu);
if (ret)
- goto out_detach_hypervisor_vgpu;
+ goto out_clean_vgpu_resource;
ret = intel_vgpu_init_opregion(vgpu);
if (ret)
@@ -431,14 +426,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_gvt_debugfs_add_vgpu(vgpu);
- ret = intel_gvt_hypervisor_set_opregion(vgpu);
+ ret = intel_gvt_set_opregion(vgpu);
if (ret)
goto out_clean_sched_policy;
if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
- ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
+ ret = intel_gvt_set_edid(vgpu, PORT_B);
else
- ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+ ret = intel_gvt_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
@@ -454,8 +449,6 @@ out_clean_opregion:
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
-out_detach_hypervisor_vgpu:
- intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
@@ -483,7 +476,6 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params param;
struct intel_vgpu *vgpu;
- param.handle = 0;
param.primary = 1;
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 5f6e41636655..f93e6122f247 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -25,6 +25,8 @@
*
*/
+#include <linux/highmem.h>
+
#include <drm/drm_cache.h>
#include "gt/intel_engine.h"
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 946bbe57bfe5..94e5c29d2ee3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -28,6 +28,7 @@
#include <linux/sched/mm.h>
#include <linux/sort.h>
+#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
@@ -47,6 +48,7 @@
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
+#include "i915_driver.h"
#include "i915_irq.h"
#include "i915_scheduler.h"
#include "intel_mchbar_regs.h"
@@ -307,7 +309,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
gpu = NULL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES);
+ gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
+
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@@ -455,9 +458,11 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &to_gt(dev_priv)->rps;
- seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
- seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
- seq_printf(m, "GPU busy? %s\n", yesno(to_gt(dev_priv)->awake));
+ seq_printf(m, "RPS enabled? %s\n",
+ str_yes_no(intel_rps_is_enabled(rps)));
+ seq_printf(m, "RPS active? %s\n",
+ str_yes_no(intel_rps_is_active(rps)));
+ seq_printf(m, "GPU busy? %s\n", str_yes_no(to_gt(dev_priv)->awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -488,11 +493,11 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "Runtime power status: %s\n",
- enableddisabled(!dev_priv->power_domains.init_wakeref));
+ str_enabled_disabled(!dev_priv->power_domains.init_wakeref));
- seq_printf(m, "GPU idle: %s\n", yesno(!to_gt(dev_priv)->awake));
+ seq_printf(m, "GPU idle: %s\n", str_yes_no(!to_gt(dev_priv)->awake));
seq_printf(m, "IRQs disabled: %s\n",
- yesno(!intel_irqs_enabled(dev_priv)));
+ str_yes_no(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
seq_printf(m, "Usage count: %d\n",
atomic_read(&dev_priv->drm.dev->power.usage_count));
@@ -522,7 +527,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
seq_printf(m, "GT awake? %s [%d], %llums\n",
- yesno(to_gt(i915)->awake),
+ str_yes_no(to_gt(i915)->awake),
atomic_read(&to_gt(i915)->wakeref.count),
ktime_to_ms(intel_gt_get_awake_time(to_gt(i915))));
seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
@@ -578,8 +583,9 @@ static int i915_wedged_get(void *data, u64 *val)
static int i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
+ intel_gt_debugfs_reset_store(to_gt(i915), val);
- return intel_gt_debugfs_reset_store(to_gt(i915), val);
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
@@ -727,15 +733,17 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ intel_gt_pm_debugfs_forcewake_user_open(to_gt(i915));
- return intel_gt_pm_debugfs_forcewake_user_open(to_gt(i915));
+ return 0;
}
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ intel_gt_pm_debugfs_forcewake_user_release(to_gt(i915));
- return intel_gt_pm_debugfs_forcewake_user_release(to_gt(i915));
+ return 0;
}
static const struct file_operations i915_forcewake_fops = {
diff --git a/drivers/gpu/drm/i915/i915_deps.c b/drivers/gpu/drm/i915/i915_deps.c
index 999210b37325..297b8e4e42ee 100644
--- a/drivers/gpu/drm/i915/i915_deps.c
+++ b/drivers/gpu/drm/i915/i915_deps.c
@@ -226,7 +226,7 @@ int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv,
struct dma_fence *fence;
dma_resv_assert_held(resv);
- dma_resv_for_each_fence(&iter, resv, true, fence) {
+ dma_resv_for_each_fence(&iter, resv, dma_resv_usage_rw(true), fence) {
int ret = i915_deps_add_dependency(deps, fence, ctx);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 62b3f332bbf5..90b0ce5051af 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -36,6 +36,7 @@
#include <linux/pm_runtime.h>
#include <linux/pnp.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
@@ -76,6 +77,7 @@
#include "i915_file_private.h"
#include "i915_debugfs.h"
#include "i915_driver.h"
+#include "i915_drm_client.h"
#include "i915_drv.h"
#include "i915_getparam.h"
#include "i915_ioc32.h"
@@ -87,6 +89,7 @@
#include "i915_suspend.h"
#include "i915_switcheroo.h"
#include "i915_sysfs.h"
+#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_dram.h"
#include "intel_gvt.h"
@@ -320,9 +323,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_device_info_subplatform_init(dev_priv);
intel_step_init(dev_priv);
- intel_gt_init_early(to_gt(dev_priv), dev_priv);
intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
- intel_uncore_init_early(&dev_priv->uncore, to_gt(dev_priv));
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
@@ -353,7 +354,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_wopcm_init_early(&dev_priv->wopcm);
- __intel_gt_init_early(to_gt(dev_priv), dev_priv);
+ intel_root_gt_init_early(dev_priv);
+
+ i915_drm_clients_init(&dev_priv->clients, dev_priv);
i915_gem_init_early(dev_priv);
@@ -374,7 +377,8 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release(to_gt(dev_priv));
+ intel_gt_driver_late_release_all(dev_priv);
+ i915_drm_clients_fini(&dev_priv->clients);
intel_region_ttm_device_fini(dev_priv);
err_ttm:
vlv_suspend_cleanup(dev_priv);
@@ -393,7 +397,8 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(dev_priv);
i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release(to_gt(dev_priv));
+ intel_gt_driver_late_release_all(dev_priv);
+ i915_drm_clients_fini(&dev_priv->clients);
intel_region_ttm_device_fini(dev_priv);
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
@@ -424,13 +429,9 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
- ret = intel_uncore_setup_mmio(&dev_priv->uncore);
- if (ret < 0)
- goto err_bridge;
-
ret = intel_uncore_init_mmio(&dev_priv->uncore);
if (ret)
- goto err_mmio;
+ return ret;
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
@@ -448,9 +449,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
err_uncore:
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
-err_mmio:
- intel_uncore_cleanup_mmio(&dev_priv->uncore);
-err_bridge:
pci_dev_put(dev_priv->bridge_dev);
return ret;
@@ -464,15 +462,9 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
- intel_uncore_cleanup_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
}
-static void intel_sanitize_options(struct drm_i915_private *dev_priv)
-{
- intel_gvt_sanitize_options(dev_priv);
-}
-
/**
* i915_set_dma_info - set all relevant PCI dma info as configured for the
* platform
@@ -566,8 +558,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
}
}
- intel_sanitize_options(dev_priv);
-
/* needs to be done before ggtt probe */
intel_dram_edram_detect(dev_priv);
@@ -597,7 +587,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
- ret = intel_gt_probe_lmem(to_gt(dev_priv));
+ ret = intel_gt_tiles_init(dev_priv);
if (ret)
goto err_mem_regions;
@@ -752,7 +742,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
void
i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
{
- drm_printf(p, "iommu: %s\n", enableddisabled(intel_vtd_active(i915)));
+ drm_printf(p, "iommu: %s\n",
+ str_enabled_disabled(i915_vtd_active(i915)));
}
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
@@ -847,10 +838,14 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_vgpu_detect(i915);
- ret = i915_driver_mmio_probe(i915);
+ ret = intel_gt_probe_all(i915);
if (ret < 0)
goto out_runtime_pm_put;
+ ret = i915_driver_mmio_probe(i915);
+ if (ret < 0)
+ goto out_tiles_cleanup;
+
ret = i915_driver_hw_probe(i915);
if (ret < 0)
goto out_cleanup_mmio;
@@ -907,6 +902,8 @@ out_cleanup_hw:
i915_ggtt_driver_late_release(i915);
out_cleanup_mmio:
i915_driver_mmio_release(i915);
+out_tiles_cleanup:
+ intel_gt_release_all(i915);
out_runtime_pm_put:
enable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_driver_late_release(i915);
@@ -1010,6 +1007,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
i915_gem_context_close(file);
+ i915_drm_client_put(file_priv->client);
kfree_rcu(file_priv, rcu);
@@ -1740,6 +1738,9 @@ static const struct file_operations i915_driver_fops = {
.read = drm_read,
.compat_ioctl = i915_ioc32_compat_ioctl,
.llseek = noop_llseek,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = i915_drm_client_fdinfo,
+#endif
};
static int
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 9d11de65daaf..44ec543d92cb 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -11,6 +11,7 @@
struct pci_dev;
struct pci_device_id;
struct drm_i915_private;
+struct drm_printer;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
@@ -26,4 +27,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915);
int i915_driver_resume_switcheroo(struct drm_i915_private *i915);
int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
+void
+i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p);
+
#endif /* __I915_DRIVER_H__ */
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
new file mode 100644
index 000000000000..18d38cb59923
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <uapi/drm/i915_drm.h>
+
+#include <drm/drm_print.h>
+
+#include "gem/i915_gem_context.h"
+#include "i915_drm_client.h"
+#include "i915_file_private.h"
+#include "i915_gem.h"
+#include "i915_utils.h"
+
+void i915_drm_clients_init(struct i915_drm_clients *clients,
+ struct drm_i915_private *i915)
+{
+ clients->i915 = i915;
+ clients->next_id = 0;
+
+ xa_init_flags(&clients->xarray, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+}
+
+struct i915_drm_client *i915_drm_client_add(struct i915_drm_clients *clients)
+{
+ struct i915_drm_client *client;
+ struct xarray *xa = &clients->xarray;
+ int ret;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ xa_lock_irq(xa);
+ ret = __xa_alloc_cyclic(xa, &client->id, client, xa_limit_32b,
+ &clients->next_id, GFP_KERNEL);
+ xa_unlock_irq(xa);
+ if (ret < 0)
+ goto err;
+
+ kref_init(&client->kref);
+ spin_lock_init(&client->ctx_lock);
+ INIT_LIST_HEAD(&client->ctx_list);
+ client->clients = clients;
+
+ return client;
+
+err:
+ kfree(client);
+
+ return ERR_PTR(ret);
+}
+
+void __i915_drm_client_free(struct kref *kref)
+{
+ struct i915_drm_client *client =
+ container_of(kref, typeof(*client), kref);
+ struct xarray *xa = &client->clients->xarray;
+ unsigned long flags;
+
+ xa_lock_irqsave(xa, flags);
+ __xa_erase(xa, client->id);
+ xa_unlock_irqrestore(xa, flags);
+ kfree(client);
+}
+
+void i915_drm_clients_fini(struct i915_drm_clients *clients)
+{
+ GEM_BUG_ON(!xa_empty(&clients->xarray));
+ xa_destroy(&clients->xarray);
+}
+
+#ifdef CONFIG_PROC_FS
+static const char * const uabi_class_names[] = {
+ [I915_ENGINE_CLASS_RENDER] = "render",
+ [I915_ENGINE_CLASS_COPY] = "copy",
+ [I915_ENGINE_CLASS_VIDEO] = "video",
+ [I915_ENGINE_CLASS_VIDEO_ENHANCE] = "video-enhance",
+ [I915_ENGINE_CLASS_COMPUTE] = "compute",
+};
+
+static u64 busy_add(struct i915_gem_context *ctx, unsigned int class)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ u64 total = 0;
+
+ for_each_gem_engine(ce, rcu_dereference(ctx->engines), it) {
+ if (ce->engine->uabi_class != class)
+ continue;
+
+ total += intel_context_get_total_runtime_ns(ce);
+ }
+
+ return total;
+}
+
+static void
+show_client_class(struct seq_file *m,
+ struct i915_drm_client *client,
+ unsigned int class)
+{
+ const struct list_head *list = &client->ctx_list;
+ u64 total = atomic64_read(&client->past_runtime[class]);
+ const unsigned int capacity =
+ client->clients->i915->engine_uabi_class_count[class];
+ struct i915_gem_context *ctx;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ctx, list, client_link)
+ total += busy_add(ctx, class);
+ rcu_read_unlock();
+
+ seq_printf(m, "drm-engine-%s:\t%llu ns\n",
+ uabi_class_names[class], total);
+
+ if (capacity > 1)
+ seq_printf(m, "drm-engine-capacity-%s:\t%u\n",
+ uabi_class_names[class],
+ capacity);
+}
+
+void i915_drm_client_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct drm_file *file = f->private_data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *i915 = file_priv->dev_priv;
+ struct i915_drm_client *client = file_priv->client;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int i;
+
+ /*
+ * ******************************************************************
+ * For text output format description please see drm-usage-stats.rst!
+ * ******************************************************************
+ */
+
+ seq_printf(m, "drm-driver:\t%s\n", i915->drm.driver->name);
+ seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n",
+ pci_domain_nr(pdev->bus), pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ seq_printf(m, "drm-client-id:\t%u\n", client->id);
+
+ /*
+ * Temporarily skip showing client engine information with GuC submission till
+ * fetching engine busyness is implemented in the GuC submission backend
+ */
+ if (GRAPHICS_VER(i915) < 8 || intel_uc_uses_guc_submission(&i915->gt0.uc))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
+ show_client_class(m, client, i);
+}
+#endif
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
new file mode 100644
index 000000000000..f796c5e8e060
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __I915_DRM_CLIENT_H__
+#define __I915_DRM_CLIENT_H__
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+
+#include "gt/intel_engine_types.h"
+
+#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
+
+struct drm_i915_private;
+
+struct i915_drm_clients {
+ struct drm_i915_private *i915;
+
+ struct xarray xarray;
+ u32 next_id;
+};
+
+struct i915_drm_client {
+ struct kref kref;
+
+ unsigned int id;
+
+ spinlock_t ctx_lock; /* For add/remove from ctx_list. */
+ struct list_head ctx_list; /* List of contexts belonging to client. */
+
+ struct i915_drm_clients *clients;
+
+ /**
+ * @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
+ */
+ atomic64_t past_runtime[I915_LAST_UABI_ENGINE_CLASS + 1];
+};
+
+void i915_drm_clients_init(struct i915_drm_clients *clients,
+ struct drm_i915_private *i915);
+
+static inline struct i915_drm_client *
+i915_drm_client_get(struct i915_drm_client *client)
+{
+ kref_get(&client->kref);
+ return client;
+}
+
+void __i915_drm_client_free(struct kref *kref);
+
+static inline void i915_drm_client_put(struct i915_drm_client *client)
+{
+ kref_put(&client->kref, __i915_drm_client_free);
+}
+
+struct i915_drm_client *i915_drm_client_add(struct i915_drm_clients *clients);
+
+#ifdef CONFIG_PROC_FS
+void i915_drm_client_fdinfo(struct seq_file *m, struct file *f);
+#endif
+
+void i915_drm_clients_fini(struct i915_drm_clients *clients);
+
+#endif /* !__I915_DRM_CLIENT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fa14da84362e..00d7eeae33bd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,11 +32,6 @@
#include <uapi/drm/i915_drm.h>
-#include <asm/hypervisor.h>
-
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/intel-iommu.h>
#include <linux/pm_qos.h>
#include <drm/drm_connector.h>
@@ -66,6 +61,7 @@
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
+#include "i915_drm_client.h"
#include "i915_gem.h"
#include "i915_gpu_error.h"
#include "i915_params.h"
@@ -99,6 +95,7 @@ struct intel_dpll_funcs;
struct intel_encoder;
struct intel_fbdev;
struct intel_fdi_funcs;
+struct intel_gmbus;
struct intel_hotplug_funcs;
struct intel_initial_plane_config;
struct intel_limit;
@@ -197,30 +194,10 @@ struct drm_i915_display_funcs {
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-/*
- * HIGH_RR is the highest eDP panel refresh rate read from EDID
- * LOW_RR is the lowest eDP panel refresh rate found from EDID
- * parsing for same resolution.
- */
-enum drrs_refresh_rate_type {
- DRRS_HIGH_RR,
- DRRS_LOW_RR,
- DRRS_MAX_RR, /* RR count */
-};
-
-enum drrs_support_type {
- DRRS_NOT_SUPPORTED = 0,
- STATIC_DRRS_SUPPORT = 1,
- SEAMLESS_DRRS_SUPPORT = 2
-};
-
-struct i915_drrs {
- struct mutex mutex;
- struct delayed_work work;
- struct intel_dp *dp;
- unsigned busy_frontbuffer_bits;
- enum drrs_refresh_rate_type refresh_rate_type;
- enum drrs_support_type type;
+enum drrs_type {
+ DRRS_TYPE_NONE,
+ DRRS_TYPE_STATIC,
+ DRRS_TYPE_SEAMLESS,
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -231,16 +208,6 @@ struct i915_drrs {
#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
-struct intel_gmbus {
- struct i2c_adapter adapter;
-#define GMBUS_FORCE_BIT_RETRY (1U << 31)
- u32 force_bit;
- u32 reg0;
- i915_reg_t gpio_reg;
- struct i2c_algo_bit_data bit_algo;
- struct drm_i915_private *dev_priv;
-};
-
struct i915_suspend_saved_registers {
u32 saveDSPARB;
u32 saveSWF0[16];
@@ -360,17 +327,19 @@ struct intel_vbt_data {
bool override_afc_startup;
u8 override_afc_startup_val;
- enum drrs_support_type drrs_type;
+ u8 seamless_drrs_min_refresh_rate;
+ enum drrs_type drrs_type;
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
- bool low_vswing;
- bool initialized;
int bpp;
struct edp_power_seq pps;
+ u8 drrs_msa_timing_delay;
+ bool low_vswing;
+ bool initialized;
bool hobl;
} edp;
@@ -412,6 +381,7 @@ struct intel_vbt_data {
int crt_ddc_pin;
struct list_head display_devices;
+ struct list_head bdb_blocks;
struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
struct sdvo_device_mapping sdvo_mappings[2];
@@ -432,6 +402,9 @@ struct i915_virtual_gpu {
struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
u32 caps;
+ u32 *initial_mmio;
+ u8 *initial_cfg_space;
+ struct list_head entry;
};
struct i915_selftest_stash {
@@ -510,7 +483,7 @@ struct drm_i915_private {
struct intel_dmc dmc;
- struct intel_gmbus gmbus[GMBUS_NUM_PINS];
+ struct intel_gmbus *gmbus[GMBUS_NUM_PINS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
@@ -532,6 +505,7 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct rb_root uabi_engines;
+ unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
struct resource mch_res;
@@ -553,7 +527,6 @@ struct drm_i915_private {
struct i915_hotplug hotplug;
struct intel_fbc *fbc[I915_MAX_FBCS];
- struct i915_drrs drrs;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
@@ -666,12 +639,6 @@ struct drm_i915_private {
struct list_head global_obj_list;
- /*
- * For reading active_pipes holding any crtc lock is
- * sufficient, for writing must hold all of them.
- */
- u8 active_pipes;
-
struct i915_frontbuffer_tracking fb_tracking;
struct intel_atomic_helper {
@@ -701,8 +668,6 @@ struct drm_i915_private {
struct i915_gpu_error gpu_error;
- struct drm_i915_gem_object *vlv_pctx;
-
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
struct work_struct fbdev_suspend_work;
@@ -723,7 +688,6 @@ struct drm_i915_private {
u32 bxt_phy_grc;
u32 suspend_count;
- bool power_domains_suspended;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state *vlv_s0ix_state;
@@ -808,6 +772,14 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct intel_gt gt0;
+ /*
+ * i915->gt[0] == &i915->gt0
+ */
+#define I915_MAX_GT 4
+ struct intel_gt *gt[I915_MAX_GT];
+
+ struct kobject *sysfs_gt;
+
struct {
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
@@ -825,8 +797,6 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
- u8 framestart_delay;
-
/* Window2 specifies time required to program DSB (Window2) in number of scan lines */
u8 window2_delay;
@@ -837,8 +807,16 @@ struct drm_i915_private {
bool irq_enabled;
- /* perform PHY state sanity checks? */
- bool chv_phy_assert[2];
+ union {
+ /* perform PHY state sanity checks? */
+ bool chv_phy_assert[2];
+
+ /*
+ * DG2: Mask of PHYs that were not calibrated by the firmware
+ * and should not be used.
+ */
+ u8 snps_phy_failed_calibration;
+ };
bool ipc_enabled;
@@ -846,6 +824,8 @@ struct drm_i915_private {
struct i915_pmu pmu;
+ struct i915_drm_clients clients;
+
struct i915_hdcp_comp_master *hdcp_master;
bool hdcp_comp_added;
@@ -1083,6 +1063,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)
#define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV)
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG2)
+#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, INTEL_PONTEVECCHIO)
+
#define IS_DG2_G10(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(dev_priv) \
@@ -1090,9 +1072,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_DG2_G12(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12)
#define IS_ADLS_RPLS(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S)
+ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
#define IS_ADLP_N(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
+#define IS_ADLP_RPLP(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -1237,6 +1221,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
((gt)->info.engine_mask & \
GENMASK(first__ + count__ - 1, first__)) >> first__; \
})
+#define RCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
#define VDBOX_MASK(gt) \
ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
#define VEBOX_MASK(gt) \
@@ -1251,6 +1237,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7)
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
+#define HAS_4TILE(dev_priv) (INTEL_INFO(dev_priv)->has_4tile)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
#define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6)
@@ -1329,6 +1316,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc)
+#define HAS_HECI_PXP(dev_priv) \
+ (INTEL_INFO(dev_priv)->has_heci_pxp)
+
+#define HAS_HECI_GSCFI(dev_priv) \
+ (INTEL_INFO(dev_priv)->has_heci_gscfi)
+
+#define HAS_HECI_GSC(dev_priv) (HAS_HECI_PXP(dev_priv) || HAS_HECI_GSCFI(dev_priv))
+
#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
@@ -1398,42 +1393,13 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GUC_DEPRIVILEGE(dev_priv) \
(INTEL_INFO(dev_priv)->has_guc_deprivilege)
-static inline bool run_as_guest(void)
-{
- return !hypervisor_is_type(X86_HYPER_NATIVE);
-}
+#define HAS_PERCTX_PREEMPT_CTRL(i915) \
+ ((GRAPHICS_VER(i915) >= 9) && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
IS_ALDERLAKE_S(dev_priv))
-static inline bool intel_vtd_active(struct drm_i915_private *i915)
-{
- if (device_iommu_mapped(i915->drm.dev))
- return true;
-
- /* Running as a guest, we assume the host is enforcing VT'd */
- return run_as_guest();
-}
-
-void
-i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p);
-
-static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
-{
- return DISPLAY_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv);
-}
-
-static inline bool
-intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
-{
- return IS_BROXTON(i915) && intel_vtd_active(i915);
-}
-
-static inline bool
-intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
-{
- return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
-}
+#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915))
/* i915_gem.c */
void i915_gem_init_early(struct drm_i915_private *dev_priv);
@@ -1508,15 +1474,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
-/* i915_gem_tiling.c */
-static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- return to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
- i915_gem_object_is_tiled(obj);
-}
-
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_file_private.h b/drivers/gpu/drm/i915/i915_file_private.h
index fb16cc431b2a..f42877869692 100644
--- a/drivers/gpu/drm/i915/i915_file_private.h
+++ b/drivers/gpu/drm/i915/i915_file_private.h
@@ -12,6 +12,7 @@
struct drm_i915_private;
struct drm_file;
+struct i915_drm_client;
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
@@ -103,6 +104,8 @@ struct drm_i915_file_private {
/** ban_score: Accumulated score of all ctx bans and fast hangs. */
atomic_t ban_score;
unsigned long hang_timestamp;
+
+ struct i915_drm_client *client;
};
#endif /* __I915_FILE_PRIVATE_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2e10187cd0a0..702e5b89be22 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -118,6 +118,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags)
{
struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
+ bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
LIST_HEAD(still_in_list);
intel_wakeref_t wakeref;
struct i915_vma *vma;
@@ -142,8 +143,6 @@ try_again:
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
- struct i915_address_space *vm = vma->vm;
-
list_move_tail(&vma->obj_link, &still_in_list);
if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
continue;
@@ -153,40 +152,44 @@ try_again:
break;
}
+ /*
+ * Requiring the vm destructor to take the object lock
+ * before destroying a vma would help us eliminate the
+ * i915_vm_tryget() here, AND thus also the barrier stuff
+ * at the end. That's an easy fix, but sleeping locks in
+ * a kthread should generally be avoided.
+ */
ret = -EAGAIN;
- if (!i915_vm_tryopen(vm))
+ if (!i915_vm_tryget(vma->vm))
break;
- /* Prevent vma being freed by i915_vma_parked as we unbind */
- vma = __i915_vma_get(vma);
spin_unlock(&obj->vma.lock);
- if (vma) {
- bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
- ret = -EBUSY;
- if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
- assert_object_held(vma->obj);
- ret = i915_vma_unbind_async(vma, vm_trylock);
- }
+ /*
+ * Since i915_vma_parked() takes the object lock
+ * before vma destruction, it won't race us here,
+ * and destroy the vma from under us.
+ */
- if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
- !i915_vma_is_active(vma))) {
- if (vm_trylock) {
- if (mutex_trylock(&vma->vm->mutex)) {
- ret = __i915_vma_unbind(vma);
- mutex_unlock(&vma->vm->mutex);
- } else {
- ret = -EBUSY;
- }
- } else {
- ret = i915_vma_unbind(vma);
+ ret = -EBUSY;
+ if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
+ assert_object_held(vma->obj);
+ ret = i915_vma_unbind_async(vma, vm_trylock);
+ }
+
+ if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+ !i915_vma_is_active(vma))) {
+ if (vm_trylock) {
+ if (mutex_trylock(&vma->vm->mutex)) {
+ ret = __i915_vma_unbind(vma);
+ mutex_unlock(&vma->vm->mutex);
}
+ } else {
+ ret = i915_vma_unbind(vma);
}
-
- __i915_vma_put(vma);
}
- i915_vm_close(vm);
+ i915_vm_put(vma->vm);
spin_lock(&obj->vma.lock);
}
list_splice_init(&still_in_list, &obj->vma.list);
@@ -936,8 +939,19 @@ new_vma:
if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
return ERR_PTR(-ENOSPC);
+ /*
+ * If this misplaced vma is too big (i.e, at-least
+ * half the size of aperture) or hasn't been pinned
+ * mappable before, we ignore the misplacement when
+ * PIN_NONBLOCK is set in order to avoid the ping-pong
+ * issue described above. In other words, we try to
+ * avoid the costly operation of unbinding this vma
+ * from the GGTT and rebinding it back because there
+ * may not be enough space for this vma in the aperture.
+ */
if (flags & PIN_MAPPABLE &&
- vma->fence_size > ggtt->mappable_end / 2)
+ (vma->fence_size > ggtt->mappable_end / 2 ||
+ !i915_vma_is_map_and_fenceable(vma)))
return ERR_PTR(-ENOSPC);
}
@@ -1213,25 +1227,40 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
- int ret;
+ struct i915_drm_client *client;
+ int ret = -ENOMEM;
DRM_DEBUG("\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
- return -ENOMEM;
+ goto err_alloc;
+
+ client = i915_drm_client_add(&i915->clients);
+ if (IS_ERR(client)) {
+ ret = PTR_ERR(client);
+ goto err_client;
+ }
file->driver_priv = file_priv;
file_priv->dev_priv = i915;
file_priv->file = file;
+ file_priv->client = client;
file_priv->bsd_engine = -1;
file_priv->hang_timestamp = jiffies;
ret = i915_gem_context_open(i915, file);
if (ret)
- kfree(file_priv);
+ goto err_context;
+
+ return 0;
+err_context:
+ i915_drm_client_put(client);
+err_client:
+ kfree(file_priv);
+err_alloc:
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 1d042551619e..0512c66fa4f3 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -28,9 +28,11 @@
*/
#include <linux/ascii85.h>
+#include <linux/highmem.h>
#include <linux/nmi.h>
#include <linux/pagevec.h>
#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
#include <linux/utsname.h>
#include <linux/zlib.h>
@@ -46,12 +48,14 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_regs.h"
+#include "gt/uc/intel_guc_capture.h"
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_memcpy.h"
#include "i915_scatterlist.h"
+#include "i915_utils.h"
#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
@@ -508,13 +512,10 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- const u32 period = to_gt(m->i915)->clock_period_ns;
-
err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
ctx->guilty, ctx->active,
- ctx->total_runtime * period,
- mul_u32_u32(ctx->avg_runtime, period));
+ ctx->total_runtime, ctx->avg_runtime);
}
static struct i915_vma_coredump *
@@ -529,8 +530,8 @@ __find_vma(struct i915_vma_coredump *vma, const char *name)
return NULL;
}
-static struct i915_vma_coredump *
-find_batch(const struct intel_engine_coredump *ee)
+struct i915_vma_coredump *
+intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
{
return __find_vma(ee->vma, "batch");
}
@@ -558,7 +559,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
error_print_instdone(m, ee);
- batch = find_batch(ee);
+ batch = intel_gpu_error_find_batch(ee);
if (batch) {
u64 start = batch->gtt_offset;
u64 end = start + batch->gtt_size;
@@ -593,15 +594,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
ee->vm_info.pp_dir_base);
}
}
- err_printf(m, " hung: %u\n", ee->hung);
- err_printf(m, " engine reset count: %u\n", ee->reset_count);
for (n = 0; n < ee->num_ports; n++) {
err_printf(m, " ELSP[%d]:", n);
error_print_request(m, " ", &ee->execlist[n]);
}
-
- error_print_context(m, " Active context: ", &ee->context);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -613,9 +610,9 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-static void print_error_vma(struct drm_i915_error_state_buf *m,
- const struct intel_engine_cs *engine,
- const struct i915_vma_coredump *vma)
+void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_cs *engine,
+ const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
struct page *page;
@@ -684,7 +681,7 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
- print_error_vma(m, NULL, error_uc->guc_log);
+ intel_gpu_error_print_vma(m, NULL, error_uc->guc_log);
}
static void err_free_sgl(struct scatterlist *sgl)
@@ -710,26 +707,33 @@ static void err_print_gt_info(struct drm_i915_error_state_buf *m,
struct drm_printer p = i915_error_printer(m);
intel_gt_info_print(&gt->info, &p);
- intel_sseu_print_topology(&gt->info.sseu, &p);
+ intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
}
-static void err_print_gt(struct drm_i915_error_state_buf *m,
- struct intel_gt_coredump *gt)
+static void err_print_gt_display(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ err_printf(m, "IER: 0x%08x\n", gt->ier);
+ err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
+}
+
+static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
{
- const struct intel_engine_coredump *ee;
int i;
- err_printf(m, "GT awake: %s\n", yesno(gt->awake));
+ err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
err_printf(m, "EIR: 0x%08x\n", gt->eir);
- err_printf(m, "IER: 0x%08x\n", gt->ier);
+ err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
+
for (i = 0; i < gt->ngtier; i++)
err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
- err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
- err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
- err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
+}
- for (i = 0; i < gt->nfence; i++)
- err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
+static void err_print_gt_global(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
err_printf(m, "ERROR: 0x%08x\n", gt->error);
@@ -752,7 +756,7 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
if (GRAPHICS_VER(m->i915) >= 12) {
int i;
- for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ for (i = 0; i < I915_MAX_SFC; i++) {
/*
* SFC_DONE resides in the VD forcewake domain, so it
* only exists if the corresponding VCS engine is
@@ -768,19 +772,38 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
}
+}
+
+static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ int i;
+
+ for (i = 0; i < gt->nfence; i++)
+ err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
+}
+
+static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ const struct intel_engine_coredump *ee;
for (ee = gt->engine; ee; ee = ee->next) {
const struct i915_vma_coredump *vma;
- error_print_engine(m, ee);
+ if (ee->guc_capture_node)
+ intel_guc_capture_print_engine_node(m, ee);
+ else
+ error_print_engine(m, ee);
+
+ err_printf(m, " hung: %u\n", ee->hung);
+ err_printf(m, " engine reset count: %u\n", ee->reset_count);
+ error_print_context(m, " Active context: ", &ee->context);
+
for (vma = ee->vma; vma; vma = vma->next)
- print_error_vma(m, ee->engine, vma);
+ intel_gpu_error_print_vma(m, ee->engine, vma);
}
- if (gt->uc)
- err_print_uc(m, gt->uc);
-
- err_print_gt_info(m, gt);
}
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
@@ -823,21 +846,35 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_DMC(m->i915)) {
- struct intel_dmc *dmc = &m->i915->dmc;
+ intel_dmc_print_error_state(m, m->i915);
- err_printf(m, "DMC loaded: %s\n",
- yesno(intel_dmc_has_payload(m->i915) != 0));
- err_printf(m, "DMC fw version: %d.%d\n",
- DMC_VERSION_MAJOR(dmc->version),
- DMC_VERSION_MINOR(dmc->version));
- }
+ err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
+ err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
- err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
- err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
+ if (error->gt) {
+ bool print_guc_capture = false;
- if (error->gt)
- err_print_gt(m, error->gt);
+ if (error->gt->uc && error->gt->uc->is_guc_capture)
+ print_guc_capture = true;
+
+ err_print_gt_display(m, error->gt);
+ err_print_gt_global_nonguc(m, error->gt);
+ err_print_gt_fences(m, error->gt);
+
+ /*
+ * GuC dumped global, eng-class and eng-instance registers together
+ * as part of engine state dump so we print in err_print_gt_engines
+ */
+ if (!print_guc_capture)
+ err_print_gt_global(m, error->gt);
+
+ err_print_gt_engines(m, error->gt);
+
+ if (error->gt->uc)
+ err_print_uc(m, error->gt->uc);
+
+ err_print_gt_info(m, error->gt);
+ }
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -985,6 +1022,7 @@ static void cleanup_gt(struct intel_gt_coredump *gt)
gt->engine = ee->next;
i915_vma_coredump_free(ee->vma);
+ intel_guc_capture_free_node(ee);
kfree(ee);
}
@@ -1318,8 +1356,8 @@ static bool record_context(struct i915_gem_context_coredump *e,
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
- e->total_runtime = rq->context->runtime.total;
- e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
+ e->total_runtime = intel_context_get_total_runtime_ns(rq->context);
+ e->avg_runtime = intel_context_get_avg_runtime_ns(rq->context);
simulated = i915_gem_context_no_error_capture(ctx);
@@ -1436,7 +1474,7 @@ static void add_vma_coredump(struct intel_engine_coredump *ee,
}
struct intel_engine_coredump *
-intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
{
struct intel_engine_coredump *ee;
@@ -1446,8 +1484,10 @@ intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
ee->engine = engine;
- engine_record_registers(ee);
- engine_record_execlists(ee);
+ if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) {
+ engine_record_registers(ee);
+ engine_record_execlists(ee);
+ }
return ee;
}
@@ -1511,7 +1551,8 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
static struct intel_engine_coredump *
capture_engine(struct intel_engine_cs *engine,
- struct i915_vma_compress *compress)
+ struct i915_vma_compress *compress,
+ u32 dump_flags)
{
struct intel_engine_capture_vma *capture = NULL;
struct intel_engine_coredump *ee;
@@ -1519,7 +1560,7 @@ capture_engine(struct intel_engine_cs *engine,
struct i915_request *rq = NULL;
unsigned long flags;
- ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL);
+ ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
if (!ee)
return NULL;
@@ -1552,6 +1593,8 @@ capture_engine(struct intel_engine_cs *engine,
i915_request_put(rq);
goto no_request_capture;
}
+ if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
+ intel_guc_capture_get_matching_node(engine->gt, ee, ce);
intel_engine_coredump_add_vma(ee, capture, compress);
i915_request_put(rq);
@@ -1566,7 +1609,8 @@ no_request_capture:
static void
gt_record_engines(struct intel_gt_coredump *gt,
intel_engine_mask_t engine_mask,
- struct i915_vma_compress *compress)
+ struct i915_vma_compress *compress,
+ u32 dump_flags)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1577,7 +1621,7 @@ gt_record_engines(struct intel_gt_coredump *gt,
/* Refill our page pool before entering atomic section */
pool_refill(&compress->pool, ALLOW_FAIL);
- ee = capture_engine(engine, compress);
+ ee = capture_engine(engine, compress, dump_flags);
if (!ee)
continue;
@@ -1585,6 +1629,8 @@ gt_record_engines(struct intel_gt_coredump *gt,
gt->simulated |= ee->simulated;
if (ee->simulated) {
+ if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
+ intel_guc_capture_free_node(ee);
kfree(ee);
continue;
}
@@ -1620,8 +1666,74 @@ gt_record_uc(struct intel_gt_coredump *gt,
return error_uc;
}
-/* Capture all registers which don't fit into another category. */
-static void gt_record_regs(struct intel_gt_coredump *gt)
+/* Capture display registers. */
+static void gt_record_display_regs(struct intel_gt_coredump *gt)
+{
+ struct intel_uncore *uncore = gt->_gt->uncore;
+ struct drm_i915_private *i915 = uncore->i915;
+
+ if (GRAPHICS_VER(i915) >= 6)
+ gt->derrmr = intel_uncore_read(uncore, DERRMR);
+
+ if (GRAPHICS_VER(i915) >= 8)
+ gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+ else if (IS_VALLEYVIEW(i915))
+ gt->ier = intel_uncore_read(uncore, VLV_IER);
+ else if (HAS_PCH_SPLIT(i915))
+ gt->ier = intel_uncore_read(uncore, DEIER);
+ else if (GRAPHICS_VER(i915) == 2)
+ gt->ier = intel_uncore_read16(uncore, GEN2_IER);
+ else
+ gt->ier = intel_uncore_read(uncore, GEN2_IER);
+}
+
+/* Capture all other registers that GuC doesn't capture. */
+static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
+{
+ struct intel_uncore *uncore = gt->_gt->uncore;
+ struct drm_i915_private *i915 = uncore->i915;
+ int i;
+
+ if (IS_VALLEYVIEW(i915)) {
+ gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+ gt->ngtier = 1;
+ } else if (GRAPHICS_VER(i915) >= 11) {
+ gt->gtier[0] =
+ intel_uncore_read(uncore,
+ GEN11_RENDER_COPY_INTR_ENABLE);
+ gt->gtier[1] =
+ intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
+ gt->gtier[2] =
+ intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
+ gt->gtier[3] =
+ intel_uncore_read(uncore,
+ GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ gt->gtier[4] =
+ intel_uncore_read(uncore,
+ GEN11_CRYPTO_RSVD_INTR_ENABLE);
+ gt->gtier[5] =
+ intel_uncore_read(uncore,
+ GEN11_GUNIT_CSME_INTR_ENABLE);
+ gt->ngtier = 6;
+ } else if (GRAPHICS_VER(i915) >= 8) {
+ for (i = 0; i < 4; i++)
+ gt->gtier[i] =
+ intel_uncore_read(uncore, GEN8_GT_IER(i));
+ gt->ngtier = 4;
+ } else if (HAS_PCH_SPLIT(i915)) {
+ gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+ gt->ngtier = 1;
+ }
+
+ gt->eir = intel_uncore_read(uncore, EIR);
+ gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
+}
+
+/*
+ * Capture all registers that relate to workload submission.
+ * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us
+ */
+static void gt_record_global_regs(struct intel_gt_coredump *gt)
{
struct intel_uncore *uncore = gt->_gt->uncore;
struct drm_i915_private *i915 = uncore->i915;
@@ -1637,11 +1749,8 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
*/
/* 1: Registers specific to a single generation */
- if (IS_VALLEYVIEW(i915)) {
- gt->gtier[0] = intel_uncore_read(uncore, GTIER);
- gt->ier = intel_uncore_read(uncore, VLV_IER);
+ if (IS_VALLEYVIEW(i915))
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
- }
if (GRAPHICS_VER(i915) == 7)
gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
@@ -1669,7 +1778,6 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
if (GRAPHICS_VER(i915) >= 6) {
- gt->derrmr = intel_uncore_read(uncore, DERRMR);
if (GRAPHICS_VER(i915) < 12) {
gt->error = intel_uncore_read(uncore, ERROR_GEN6);
gt->done_reg = intel_uncore_read(uncore, DONE_REG);
@@ -1689,7 +1797,7 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
if (GRAPHICS_VER(i915) >= 12) {
- for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ for (i = 0; i < I915_MAX_SFC; i++) {
/*
* SFC_DONE resides in the VD forcewake domain, so it
* only exists if the corresponding VCS engine is
@@ -1705,44 +1813,6 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
}
-
- /* 4: Everything else */
- if (GRAPHICS_VER(i915) >= 11) {
- gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- gt->gtier[0] =
- intel_uncore_read(uncore,
- GEN11_RENDER_COPY_INTR_ENABLE);
- gt->gtier[1] =
- intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
- gt->gtier[2] =
- intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
- gt->gtier[3] =
- intel_uncore_read(uncore,
- GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- gt->gtier[4] =
- intel_uncore_read(uncore,
- GEN11_CRYPTO_RSVD_INTR_ENABLE);
- gt->gtier[5] =
- intel_uncore_read(uncore,
- GEN11_GUNIT_CSME_INTR_ENABLE);
- gt->ngtier = 6;
- } else if (GRAPHICS_VER(i915) >= 8) {
- gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- for (i = 0; i < 4; i++)
- gt->gtier[i] =
- intel_uncore_read(uncore, GEN8_GT_IER(i));
- gt->ngtier = 4;
- } else if (HAS_PCH_SPLIT(i915)) {
- gt->ier = intel_uncore_read(uncore, DEIER);
- gt->gtier[0] = intel_uncore_read(uncore, GTIER);
- gt->ngtier = 1;
- } else if (GRAPHICS_VER(i915) == 2) {
- gt->ier = intel_uncore_read16(uncore, GEN2_IER);
- } else if (!IS_VALLEYVIEW(i915)) {
- gt->ier = intel_uncore_read(uncore, GEN2_IER);
- }
- gt->eir = intel_uncore_read(uncore, EIR);
- gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
}
static void gt_record_info(struct intel_gt_coredump *gt)
@@ -1812,7 +1882,7 @@ static void capture_gen(struct i915_gpu_coredump *error)
error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
error->suspended = i915->runtime_pm.suspended;
- error->iommu = intel_vtd_active(i915);
+ error->iommu = i915_vtd_active(i915);
error->reset_count = i915_reset_count(&i915->gpu_error);
error->suspend_count = i915->suspend_count;
@@ -1854,7 +1924,7 @@ i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
struct intel_gt_coredump *
-intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
{
struct intel_gt_coredump *gc;
@@ -1865,7 +1935,21 @@ intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
gc->_gt = gt;
gc->awake = intel_gt_pm_is_awake(gt);
- gt_record_regs(gc);
+ gt_record_display_regs(gc);
+ gt_record_global_nonguc_regs(gc);
+
+ /*
+ * GuC dumps global, eng-class and eng-instance registers
+ * (that can change as part of engine state during execution)
+ * before an engine is reset due to a hung context.
+ * GuC captures and reports all three groups of registers
+ * together as a single set before the engine is reset.
+ * Thus, if GuC triggered the context reset we retrieve
+ * the register values as part of gt_record_engines.
+ */
+ if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE))
+ gt_record_global_regs(gc);
+
gt_record_fences(gc);
return gc;
@@ -1899,7 +1983,7 @@ void i915_vma_capture_finish(struct intel_gt_coredump *gt,
}
static struct i915_gpu_coredump *
-__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
struct drm_i915_private *i915 = gt->i915;
struct i915_gpu_coredump *error;
@@ -1913,7 +1997,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
if (!error)
return ERR_PTR(-ENOMEM);
- error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
+ error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags);
if (error->gt) {
struct i915_vma_compress *compress;
@@ -1924,11 +2008,19 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
return ERR_PTR(-ENOMEM);
}
+ if (INTEL_INFO(i915)->has_gt_uc) {
+ error->gt->uc = gt_record_uc(error->gt, compress);
+ if (error->gt->uc) {
+ if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
+ error->gt->uc->is_guc_capture = true;
+ else
+ GEM_BUG_ON(error->gt->uc->is_guc_capture);
+ }
+ }
+
gt_record_info(error->gt);
- gt_record_engines(error->gt, engine_mask, compress);
+ gt_record_engines(error->gt, engine_mask, compress, dump_flags);
- if (INTEL_INFO(i915)->has_gt_uc)
- error->gt->uc = gt_record_uc(error->gt, compress);
i915_vma_capture_finish(error->gt, compress);
@@ -1941,7 +2033,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
}
struct i915_gpu_coredump *
-i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
static DEFINE_MUTEX(capture_mutex);
int ret = mutex_lock_interruptible(&capture_mutex);
@@ -1950,7 +2042,7 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
if (ret)
return ERR_PTR(ret);
- dump = __i915_gpu_coredump(gt, engine_mask);
+ dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
mutex_unlock(&capture_mutex);
return dump;
@@ -1997,11 +2089,11 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
* to pick up.
*/
void i915_capture_error_state(struct intel_gt *gt,
- intel_engine_mask_t engine_mask)
+ intel_engine_mask_t engine_mask, u32 dump_flags)
{
struct i915_gpu_coredump *error;
- error = i915_gpu_coredump(gt, engine_mask);
+ error = i915_gpu_coredump(gt, engine_mask, dump_flags);
if (IS_ERR(error)) {
cmpxchg(&gt->i915->gpu_error.first_error, NULL, error);
return;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 903d838e2e63..a611abacd9c2 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -53,6 +53,8 @@ struct i915_request_coredump {
struct i915_sched_attr sched_attr;
};
+struct __guc_capture_parsed_output;
+
struct intel_engine_coredump {
const struct intel_engine_cs *engine;
@@ -84,11 +86,15 @@ struct intel_engine_coredump {
u32 rc_psmi; /* sleep state */
struct intel_instdone instdone;
+ /* GuC matched capture-lists info */
+ struct intel_guc_state_capture *capture;
+ struct __guc_capture_parsed_output *guc_capture_node;
+
struct i915_gem_context_coredump {
char comm[TASK_COMM_LEN];
u64 total_runtime;
- u32 avg_runtime;
+ u64 avg_runtime;
pid_t pid;
int active;
@@ -124,7 +130,6 @@ struct intel_gt_coredump {
u32 pgtbl_er;
u32 ier;
u32 gtier[6], ngtier;
- u32 derrmr;
u32 forcewake;
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
@@ -137,9 +142,12 @@ struct intel_gt_coredump {
u32 gfx_mode;
u32 gtt_cache;
u32 aux_err; /* gen12 */
- u32 sfc_done[GEN12_SFC_DONE_MAX]; /* gen12 */
u32 gam_done; /* gen12 */
+ /* Display related */
+ u32 derrmr;
+ u32 sfc_done[I915_MAX_SFC]; /* gen12 */
+
u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
@@ -149,6 +157,7 @@ struct intel_gt_coredump {
struct intel_uc_fw guc_fw;
struct intel_uc_fw huc_fw;
struct i915_vma_coredump *guc_log;
+ bool is_guc_capture;
} *uc;
struct intel_gt_coredump *next;
@@ -221,24 +230,32 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return atomic_read(&error->reset_engine_count[engine->uabi_class]);
}
+#define CORE_DUMP_FLAG_NONE 0x0
+#define CORE_DUMP_FLAG_IS_GUC_CAPTURE BIT(0)
+
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_cs *engine,
+ const struct i915_vma_coredump *vma);
+struct i915_vma_coredump *
+intel_gpu_error_find_batch(const struct intel_engine_coredump *ee);
struct i915_gpu_coredump *i915_gpu_coredump(struct intel_gt *gt,
- intel_engine_mask_t engine_mask);
+ intel_engine_mask_t engine_mask, u32 dump_flags);
void i915_capture_error_state(struct intel_gt *gt,
- intel_engine_mask_t engine_mask);
+ intel_engine_mask_t engine_mask, u32 dump_flags);
struct i915_gpu_coredump *
i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp);
struct intel_gt_coredump *
-intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp);
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags);
struct intel_engine_coredump *
-intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp);
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags);
struct intel_engine_capture_vma *
intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
@@ -281,8 +298,14 @@ void i915_disable_error_state(struct drm_i915_private *i915, int err);
#else
+__printf(2, 3)
+static inline void
+i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+}
+
static inline void
-i915_capture_error_state(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+i915_capture_error_state(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
}
@@ -293,13 +316,13 @@ i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
}
static inline struct intel_gt_coredump *
-intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
{
return NULL;
}
static inline struct intel_engine_coredump *
-intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
{
return NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index eea355c2fc28..701fbc98afa0 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -22,6 +22,8 @@
* IN THE SOFTWARE.
*/
+#include <linux/string_helpers.h>
+
#include <drm/drm_print.h>
#include "i915_params.h"
@@ -94,7 +96,7 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0400,
i915_param_named_unsafe(enable_psr, int, 0400,
"Enable PSR "
- "(0=disabled, 1=enabled) "
+ "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
"Default: -1 (use per-chip default)");
i915_param_named(psr_safest_params, bool, 0400,
@@ -200,13 +202,17 @@ i915_param_named_unsafe(request_timeout_ms, uint, 0600,
"Default request/fence/batch buffer expiration timeout.");
#endif
+i915_param_named_unsafe(lmem_size, uint, 0400,
+ "Set the lmem size(in MiB) for each region. (default: 0, all memory)");
+
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
const char *type,
const void *x)
{
if (!__builtin_strcmp(type, "bool"))
- drm_printf(p, "i915.%s=%s\n", name, yesno(*(const bool *)x));
+ drm_printf(p, "i915.%s=%s\n", name,
+ str_yes_no(*(const bool *)x));
else if (!__builtin_strcmp(type, "int"))
drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
else if (!__builtin_strcmp(type, "unsigned int"))
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index c779a6f85c7e..b5e7ea45d191 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -73,6 +73,7 @@ struct drm_printer;
param(int, enable_dpcd_backlight, -1, 0600) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \
+ param(unsigned int, lmem_size, 0, 0400) \
/* leave bools at the end to not create holes */ \
param(bool, enable_hangcheck, true, 0600) \
param(bool, load_detect_test, false, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index c32c0c6661c8..acf688b698c3 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -901,7 +901,8 @@ static const struct intel_device_info rkl_info = {
.has_llc = 0, \
.has_pxp = 0, \
.has_snoop = 1, \
- .is_dgfx = 1
+ .is_dgfx = 1, \
+ .has_heci_gscfi = 1
static const struct intel_device_info dg1_info = {
GEN12_FEATURES,
@@ -1036,29 +1037,63 @@ static const struct intel_device_info xehpsdv_info = {
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
- BIT(VCS4) | BIT(VCS5) | BIT(VCS6) | BIT(VCS7),
+ BIT(VCS4) | BIT(VCS5) | BIT(VCS6) | BIT(VCS7) |
+ BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
.require_force_probe = 1,
};
-__maybe_unused
+#define DG2_FEATURES \
+ XE_HP_FEATURES, \
+ XE_HPM_FEATURES, \
+ DGFX_FEATURES, \
+ .graphics.rel = 55, \
+ .media.rel = 55, \
+ PLATFORM(INTEL_DG2), \
+ .has_4tile = 1, \
+ .has_64k_pages = 1, \
+ .has_guc_deprivilege = 1, \
+ .has_heci_pxp = 1, \
+ .needs_compact_pt = 1, \
+ .platform_engine_mask = \
+ BIT(RCS0) | BIT(BCS0) | \
+ BIT(VECS0) | BIT(VECS1) | \
+ BIT(VCS0) | BIT(VCS2) | \
+ BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3)
+
static const struct intel_device_info dg2_info = {
- XE_HP_FEATURES,
- XE_HPM_FEATURES,
+ DG2_FEATURES,
XE_LPD_FEATURES,
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
+ .require_force_probe = 1,
+};
+
+__maybe_unused
+static const struct intel_device_info ats_m_info = {
+ DG2_FEATURES,
+ .display = { 0 },
+ .require_force_probe = 1,
+};
+
+#define XE_HPC_FEATURES \
+ XE_HP_FEATURES, \
+ .dma_mask_size = 52
+
+__maybe_unused
+static const struct intel_device_info pvc_info = {
+ XE_HPC_FEATURES,
+ XE_HPM_FEATURES,
DGFX_FEATURES,
- .graphics.rel = 55,
- .media.rel = 55,
- PLATFORM(INTEL_DG2),
- .has_guc_deprivilege = 1,
- .has_64k_pages = 1,
- .needs_compact_pt = 1,
+ .graphics.rel = 60,
+ .media.rel = 60,
+ PLATFORM(INTEL_PONTEVECCHIO),
+ .display = { 0 },
+ .has_flat_ccs = 0,
.platform_engine_mask =
- BIT(RCS0) | BIT(BCS0) |
- BIT(VECS0) | BIT(VECS1) |
- BIT(VCS0) | BIT(VCS2),
+ BIT(BCS0) |
+ BIT(VCS0) |
+ BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
.require_force_probe = 1,
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
};
#undef PLATFORM
@@ -1140,6 +1175,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_ADLN_IDS(&adl_p_info),
INTEL_DG1_IDS(&dg1_info),
INTEL_RPLS_IDS(&adl_s_info),
+ INTEL_RPLP_IDS(&adl_p_info),
+ INTEL_DG2_IDS(&dg2_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0a9c3fcc09b1..1577ab6754db 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -4050,8 +4050,8 @@ addr_err:
return ERR_PTR(err);
}
-static ssize_t show_dynamic_id(struct device *dev,
- struct device_attribute *attr,
+static ssize_t show_dynamic_id(struct kobject *kobj,
+ struct kobj_attribute *attr,
char *buf)
{
struct i915_oa_config *oa_config =
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 473a3c0544bb..05cb9a335a97 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -55,7 +55,7 @@ struct i915_oa_config {
struct attribute_group sysfs_metric;
struct attribute *attrs[2];
- struct device_attribute sysfs_metric_id;
+ struct kobj_attribute sysfs_metric_id;
struct kref ref;
struct rcu_head rcu;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index cfc21042499d..3e3b09588fd3 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -148,10 +148,7 @@ static u64 __get_rc6(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
u64 val;
- val = intel_rc6_residency_ns(&gt->rc6,
- IS_VALLEYVIEW(i915) ?
- VLV_GT_RENDER_RC6 :
- GEN6_GT_GFX_RC6);
+ val = intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6);
if (HAS_RC6p(i915))
val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6p);
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 2dfbc22857a3..7584cec53d5d 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -9,6 +9,7 @@
#include "i915_drv.h"
#include "i915_perf.h"
#include "i915_query.h"
+#include "gt/intel_engine_user.h"
#include <uapi/drm/i915_drm.h>
static int copy_query_item(void *query_hdr, size_t query_sz,
@@ -28,36 +29,30 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
return 0;
}
-static int query_topology_info(struct drm_i915_private *dev_priv,
- struct drm_i915_query_item *query_item)
+static int fill_topology_info(const struct sseu_dev_info *sseu,
+ struct drm_i915_query_item *query_item,
+ const u8 *subslice_mask)
{
- const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
int ret;
- if (query_item->flags != 0)
- return -EINVAL;
+ BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
if (sseu->max_slices == 0)
return -ENODEV;
- BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
-
slice_length = sizeof(sseu->slice_mask);
subslice_length = sseu->max_slices * sseu->ss_stride;
eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
total_length = sizeof(topo) + slice_length + subslice_length +
eu_length;
- ret = copy_query_item(&topo, sizeof(topo), total_length,
- query_item);
+ ret = copy_query_item(&topo, sizeof(topo), total_length, query_item);
+
if (ret != 0)
return ret;
- if (topo.flags != 0)
- return -EINVAL;
-
memset(&topo, 0, sizeof(topo));
topo.max_slices = sseu->max_slices;
topo.max_subslices = sseu->max_subslices;
@@ -69,27 +64,64 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.eu_stride = sseu->eu_stride;
if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
- &topo, sizeof(topo)))
+ &topo, sizeof(topo)))
return -EFAULT;
if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
- &sseu->slice_mask, slice_length))
+ &sseu->slice_mask, slice_length))
return -EFAULT;
if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
- sizeof(topo) + slice_length),
- sseu->subslice_mask, subslice_length))
+ sizeof(topo) + slice_length),
+ subslice_mask, subslice_length))
return -EFAULT;
if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
- sizeof(topo) +
- slice_length + subslice_length),
- sseu->eu_mask, eu_length))
+ sizeof(topo) +
+ slice_length + subslice_length),
+ sseu->eu_mask, eu_length))
return -EFAULT;
return total_length;
}
+static int query_topology_info(struct drm_i915_private *dev_priv,
+ struct drm_i915_query_item *query_item)
+{
+ const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu;
+
+ if (query_item->flags != 0)
+ return -EINVAL;
+
+ return fill_topology_info(sseu, query_item, sseu->subslice_mask);
+}
+
+static int query_geometry_subslices(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ const struct sseu_dev_info *sseu;
+ struct intel_engine_cs *engine;
+ struct i915_engine_class_instance classinstance;
+
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ return -ENODEV;
+
+ classinstance = *((struct i915_engine_class_instance *)&query_item->flags);
+
+ engine = intel_engine_lookup_user(i915, (u8)classinstance.engine_class,
+ (u8)classinstance.engine_instance);
+
+ if (!engine)
+ return -EINVAL;
+
+ if (engine->class != RENDER_CLASS)
+ return -EINVAL;
+
+ sseu = &engine->gt->info.sseu;
+
+ return fill_topology_info(sseu, query_item, sseu->geometry_subslice_mask);
+}
+
static int
query_engine_info(struct drm_i915_private *i915,
struct drm_i915_query_item *query_item)
@@ -479,12 +511,36 @@ static int query_memregion_info(struct drm_i915_private *i915,
return total_length;
}
+static int query_hwconfig_blob(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ struct intel_gt *gt = to_gt(i915);
+ struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
+
+ if (!hwconfig->size || !hwconfig->ptr)
+ return -ENODEV;
+
+ if (query_item->length == 0)
+ return hwconfig->size;
+
+ if (query_item->length < hwconfig->size)
+ return -EINVAL;
+
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+ hwconfig->ptr, hwconfig->size))
+ return -EFAULT;
+
+ return hwconfig->size;
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
query_engine_info,
query_perf_config,
query_memregion_info,
+ query_hwconfig_blob,
+ query_geometry_subslices,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index fe960c204362..4f5a51bb9e1e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -644,22 +644,20 @@
#define _PORT_PLL_A 0x46074
#define _PORT_PLL_B 0x46078
#define _PORT_PLL_C 0x4607c
-#define PORT_PLL_ENABLE (1 << 31)
-#define PORT_PLL_LOCK (1 << 30)
-#define PORT_PLL_REF_SEL (1 << 27)
-#define PORT_PLL_POWER_ENABLE (1 << 26)
-#define PORT_PLL_POWER_STATE (1 << 25)
+#define PORT_PLL_ENABLE REG_BIT(31)
+#define PORT_PLL_LOCK REG_BIT(30)
+#define PORT_PLL_REF_SEL REG_BIT(27)
+#define PORT_PLL_POWER_ENABLE REG_BIT(26)
+#define PORT_PLL_POWER_STATE REG_BIT(25)
#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
#define _PORT_PLL_EBB_0_A 0x162034
#define _PORT_PLL_EBB_0_B 0x6C034
#define _PORT_PLL_EBB_0_C 0x6C340
-#define PORT_PLL_P1_SHIFT 13
-#define PORT_PLL_P1_MASK (0x07 << PORT_PLL_P1_SHIFT)
-#define PORT_PLL_P1(x) ((x) << PORT_PLL_P1_SHIFT)
-#define PORT_PLL_P2_SHIFT 8
-#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
-#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
+#define PORT_PLL_P1_MASK REG_GENMASK(15, 13)
+#define PORT_PLL_P1(p1) REG_FIELD_PREP(PORT_PLL_P1_MASK, (p1))
+#define PORT_PLL_P2_MASK REG_GENMASK(12, 8)
+#define PORT_PLL_P2(p2) REG_FIELD_PREP(PORT_PLL_P2_MASK, (p2))
#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_PLL_EBB_0_B, \
_PORT_PLL_EBB_0_C)
@@ -667,8 +665,8 @@
#define _PORT_PLL_EBB_4_A 0x162038
#define _PORT_PLL_EBB_4_B 0x6C038
#define _PORT_PLL_EBB_4_C 0x6C344
-#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
-#define PORT_PLL_RECALIBRATE (1 << 14)
+#define PORT_PLL_RECALIBRATE REG_BIT(14)
+#define PORT_PLL_10BIT_CLK_ENABLE REG_BIT(13)
#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
_PORT_PLL_EBB_4_B, \
_PORT_PLL_EBB_4_C)
@@ -677,31 +675,33 @@
#define _PORT_PLL_0_B 0x6C100
#define _PORT_PLL_0_C 0x6C380
/* PORT_PLL_0_A */
-#define PORT_PLL_M2_MASK 0xFF
+#define PORT_PLL_M2_INT_MASK REG_GENMASK(7, 0)
+#define PORT_PLL_M2_INT(m2_int) REG_FIELD_PREP(PORT_PLL_M2_INT_MASK, (m2_int))
/* PORT_PLL_1_A */
-#define PORT_PLL_N_SHIFT 8
-#define PORT_PLL_N_MASK (0x0F << PORT_PLL_N_SHIFT)
-#define PORT_PLL_N(x) ((x) << PORT_PLL_N_SHIFT)
+#define PORT_PLL_N_MASK REG_GENMASK(11, 8)
+#define PORT_PLL_N(n) REG_FIELD_PREP(PORT_PLL_N_MASK, (n))
/* PORT_PLL_2_A */
-#define PORT_PLL_M2_FRAC_MASK 0x3FFFFF
+#define PORT_PLL_M2_FRAC_MASK REG_GENMASK(21, 0)
+#define PORT_PLL_M2_FRAC(m2_frac) REG_FIELD_PREP(PORT_PLL_M2_FRAC_MASK, (m2_frac))
/* PORT_PLL_3_A */
-#define PORT_PLL_M2_FRAC_ENABLE (1 << 16)
+#define PORT_PLL_M2_FRAC_ENABLE REG_BIT(16)
/* PORT_PLL_6_A */
-#define PORT_PLL_PROP_COEFF_MASK 0xF
-#define PORT_PLL_INT_COEFF_MASK (0x1F << 8)
-#define PORT_PLL_INT_COEFF(x) ((x) << 8)
-#define PORT_PLL_GAIN_CTL_MASK (0x07 << 16)
-#define PORT_PLL_GAIN_CTL(x) ((x) << 16)
+#define PORT_PLL_GAIN_CTL_MASK REG_GENMASK(18, 16)
+#define PORT_PLL_GAIN_CTL(x) REG_FIELD_PREP(PORT_PLL_GAIN_CTL_MASK, (x))
+#define PORT_PLL_INT_COEFF_MASK REG_GENMASK(12, 8)
+#define PORT_PLL_INT_COEFF(x) REG_FIELD_PREP(PORT_PLL_INT_COEFF_MASK, (x))
+#define PORT_PLL_PROP_COEFF_MASK REG_GENMASK(3, 0)
+#define PORT_PLL_PROP_COEFF(x) REG_FIELD_PREP(PORT_PLL_PROP_COEFF_MASK, (x))
/* PORT_PLL_8_A */
-#define PORT_PLL_TARGET_CNT_MASK 0x3FF
+#define PORT_PLL_TARGET_CNT_MASK REG_GENMASK(9, 0)
+#define PORT_PLL_TARGET_CNT(x) REG_FIELD_PREP(PORT_PLL_TARGET_CNT_MASK, (x))
/* PORT_PLL_9_A */
-#define PORT_PLL_LOCK_THRESHOLD_SHIFT 1
-#define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
+#define PORT_PLL_LOCK_THRESHOLD_MASK REG_GENMASK(3, 1)
+#define PORT_PLL_LOCK_THRESHOLD(x) REG_FIELD_PREP(PORT_PLL_LOCK_THRESHOLD_MASK, (x))
/* PORT_PLL_10_A */
-#define PORT_PLL_DCO_AMP_OVR_EN_H (1 << 27)
-#define PORT_PLL_DCO_AMP_DEFAULT 15
-#define PORT_PLL_DCO_AMP_MASK 0x3c00
-#define PORT_PLL_DCO_AMP(x) ((x) << 10)
+#define PORT_PLL_DCO_AMP_OVR_EN_H REG_BIT(27)
+#define PORT_PLL_DCO_AMP_MASK REG_GENMASK(13, 10)
+#define PORT_PLL_DCO_AMP(x) REG_FIELD_PREP(PORT_PLL_DCO_AMP_MASK, (x))
#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
_PORT_PLL_0_B, \
_PORT_PLL_0_C)
@@ -976,6 +976,10 @@
#define GEN12_COMPUTE2_RING_BASE 0x1e000
#define GEN12_COMPUTE3_RING_BASE 0x26000
#define BLT_RING_BASE 0x22000
+#define DG1_GSC_HECI1_BASE 0x00258000
+#define DG1_GSC_HECI2_BASE 0x00259000
+#define DG2_GSC_HECI1_BASE 0x00373000
+#define DG2_GSC_HECI2_BASE 0x00374000
@@ -1103,16 +1107,21 @@
#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
-#define _PIPEA_MBUS_DBOX_CTL 0x7003C
-#define _PIPEB_MBUS_DBOX_CTL 0x7103C
-#define PIPE_MBUS_DBOX_CTL(pipe) _MMIO_PIPE(pipe, _PIPEA_MBUS_DBOX_CTL, \
- _PIPEB_MBUS_DBOX_CTL)
-#define MBUS_DBOX_BW_CREDIT_MASK (3 << 14)
-#define MBUS_DBOX_BW_CREDIT(x) ((x) << 14)
-#define MBUS_DBOX_B_CREDIT_MASK (0x1F << 8)
-#define MBUS_DBOX_B_CREDIT(x) ((x) << 8)
-#define MBUS_DBOX_A_CREDIT_MASK (0xF << 0)
-#define MBUS_DBOX_A_CREDIT(x) ((x) << 0)
+#define _PIPEA_MBUS_DBOX_CTL 0x7003C
+#define _PIPEB_MBUS_DBOX_CTL 0x7103C
+#define PIPE_MBUS_DBOX_CTL(pipe) _MMIO_PIPE(pipe, _PIPEA_MBUS_DBOX_CTL, \
+ _PIPEB_MBUS_DBOX_CTL)
+#define MBUS_DBOX_B2B_TRANSACTIONS_MAX_MASK REG_GENMASK(24, 20) /* tgl+ */
+#define MBUS_DBOX_B2B_TRANSACTIONS_MAX(x) REG_FIELD_PREP(MBUS_DBOX_B2B_TRANSACTIONS_MAX_MASK, x)
+#define MBUS_DBOX_B2B_TRANSACTIONS_DELAY_MASK REG_GENMASK(19, 17) /* tgl+ */
+#define MBUS_DBOX_B2B_TRANSACTIONS_DELAY(x) REG_FIELD_PREP(MBUS_DBOX_B2B_TRANSACTIONS_DELAY_MASK, x)
+#define MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN REG_BIT(16) /* tgl+ */
+#define MBUS_DBOX_BW_CREDIT_MASK REG_GENMASK(15, 14)
+#define MBUS_DBOX_BW_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, x)
+#define MBUS_DBOX_B_CREDIT_MASK REG_GENMASK(12, 8)
+#define MBUS_DBOX_B_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_B_CREDIT_MASK, x)
+#define MBUS_DBOX_A_CREDIT_MASK REG_GENMASK(3, 0)
+#define MBUS_DBOX_A_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_A_CREDIT_MASK, x)
#define MBUS_UBOX_CTL _MMIO(0x4503C)
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
@@ -1395,6 +1404,7 @@
#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
+#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */
#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268)
@@ -1837,6 +1847,17 @@
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
+#define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
+#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
+#define PROCHOT_MASK REG_BIT(1)
+#define THERMAL_LIMIT_MASK REG_BIT(2)
+#define RATL_MASK REG_BIT(6)
+#define VR_THERMALERT_MASK REG_BIT(7)
+#define VR_TDC_MASK REG_BIT(8)
+#define POWER_LIMIT_4_MASK REG_BIT(9)
+#define POWER_LIMIT_1_MASK REG_BIT(11)
+#define POWER_LIMIT_2_MASK REG_BIT(12)
+
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -3705,9 +3726,11 @@
#define PIPECONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 3)
#define PIPECONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
#define PIPECONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
-#define PIPECONF_EDP_RR_MODE_SWITCH REG_BIT(20)
+#define PIPECONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
+#define PIPECONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
+#define PIPECONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(PIPECONF_MSA_TIMING_DELAY_MASK, (x))
#define PIPECONF_CXSR_DOWNCLOCK REG_BIT(16)
-#define PIPECONF_EDP_RR_MODE_SWITCH_VLV REG_BIT(14)
+#define PIPECONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
#define PIPECONF_COLOR_RANGE_SELECT REG_BIT(13)
#define PIPECONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
#define PIPECONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
@@ -4849,6 +4872,7 @@
#define PLANE_CTL_TILED_X REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 1)
#define PLANE_CTL_TILED_Y REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 4)
#define PLANE_CTL_TILED_YF REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 5)
+#define PLANE_CTL_TILED_4 REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 5)
#define PLANE_CTL_ASYNC_FLIP REG_BIT(9)
#define PLANE_CTL_FLIP_HORIZONTAL REG_BIT(8)
#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE REG_BIT(4) /* TGL+ */
@@ -5490,43 +5514,6 @@
#define GAMMA_MODE_MODE_SPLIT (3 << 0) /* ivb-bdw */
#define GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED (3 << 0) /* icl + */
-/* DMC */
-#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
-#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
-#define DMC_HTP_ADDR_SKL 0x00500034
-#define DMC_SSP_BASE _MMIO(0x8F074)
-#define DMC_HTP_SKL _MMIO(0x8F004)
-#define DMC_LAST_WRITE _MMIO(0x8F034)
-#define DMC_LAST_WRITE_VALUE 0xc003b400
-/* MMIO address range for DMC program (0x80000 - 0x82FFF) */
-#define DMC_MMIO_START_RANGE 0x80000
-#define DMC_MMIO_END_RANGE 0x8FFFF
-#define DMC_V1_MMIO_START_RANGE 0x80000
-#define TGL_MAIN_MMIO_START 0x8F000
-#define TGL_MAIN_MMIO_END 0x8FFFF
-#define _TGL_PIPEA_MMIO_START 0x92000
-#define _TGL_PIPEA_MMIO_END 0x93FFF
-#define _TGL_PIPEB_MMIO_START 0x96000
-#define _TGL_PIPEB_MMIO_END 0x97FFF
-#define ADLP_PIPE_MMIO_START 0x5F000
-#define ADLP_PIPE_MMIO_END 0x5FFFF
-
-#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
- _TGL_PIPEB_MMIO_START)
-
-#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
- _TGL_PIPEB_MMIO_END)
-
-#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
-#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
-#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
-#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
-#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
-#define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154)
-
-#define TGL_DMC_DEBUG3 _MMIO(0x101090)
-#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
-
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
#define MMIO_TIMEOUT_US(us) ((us) << 0)
@@ -5941,6 +5928,7 @@
#define ICL_DELAY_PMRSP REG_BIT(22)
#define DISABLE_FLR_SRC REG_BIT(15)
#define MASK_WAKEMEM REG_BIT(13)
+#define DDI_CLOCK_REG_ACCESS REG_BIT(7)
#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
@@ -6735,11 +6723,18 @@
#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
#define ADL_PCODE_MEM_SS_READ_PSF_GV_INFO ((0) | (0x2 << 8))
#define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe
-#define ICL_PCODE_POINTS_RESTRICTED 0x0
-#define ICL_PCODE_POINTS_RESTRICTED_MASK 0xf
-#define ADLS_PSF_PT_SHIFT 8
-#define ADLS_QGV_PT_MASK REG_GENMASK(7, 0)
-#define ADLS_PSF_PT_MASK REG_GENMASK(10, 8)
+#define ICL_PCODE_REP_QGV_MASK REG_GENMASK(1, 0)
+#define ICL_PCODE_REP_QGV_SAFE REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 0)
+#define ICL_PCODE_REP_QGV_POLL REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 1)
+#define ICL_PCODE_REP_QGV_REJECTED REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 2)
+#define ADLS_PCODE_REP_PSF_MASK REG_GENMASK(3, 2)
+#define ADLS_PCODE_REP_PSF_SAFE REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 0)
+#define ADLS_PCODE_REP_PSF_POLL REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 1)
+#define ADLS_PCODE_REP_PSF_REJECTED REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 2)
+#define ICL_PCODE_REQ_QGV_PT_MASK REG_GENMASK(7, 0)
+#define ICL_PCODE_REQ_QGV_PT(x) REG_FIELD_PREP(ICL_PCODE_REQ_QGV_PT_MASK, (x))
+#define ADLS_PCODE_REQ_PSF_PT_MASK REG_GENMASK(10, 8)
+#define ADLS_PCODE_REQ_PSF_PT(x) REG_FIELD_PREP(ADLS_PCODE_REQ_PSF_PT_MASK, (x))
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
#define ICL_PCODE_EXIT_TCCOLD 0x12
@@ -7566,25 +7561,25 @@ enum skl_power_gate {
#define _PORT_CLK_SEL_A 0x46100
#define _PORT_CLK_SEL_B 0x46104
#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
-#define PORT_CLK_SEL_LCPLL_2700 (0 << 29)
-#define PORT_CLK_SEL_LCPLL_1350 (1 << 29)
-#define PORT_CLK_SEL_LCPLL_810 (2 << 29)
-#define PORT_CLK_SEL_SPLL (3 << 29)
-#define PORT_CLK_SEL_WRPLL(pll) (((pll) + 4) << 29)
-#define PORT_CLK_SEL_WRPLL1 (4 << 29)
-#define PORT_CLK_SEL_WRPLL2 (5 << 29)
-#define PORT_CLK_SEL_NONE (7 << 29)
-#define PORT_CLK_SEL_MASK (7 << 29)
+#define PORT_CLK_SEL_MASK REG_GENMASK(31, 29)
+#define PORT_CLK_SEL_LCPLL_2700 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 0)
+#define PORT_CLK_SEL_LCPLL_1350 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 1)
+#define PORT_CLK_SEL_LCPLL_810 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 2)
+#define PORT_CLK_SEL_SPLL REG_FIELD_PREP(PORT_CLK_SEL_MASK, 3)
+#define PORT_CLK_SEL_WRPLL(pll) REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4 + (pll))
+#define PORT_CLK_SEL_WRPLL1 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4)
+#define PORT_CLK_SEL_WRPLL2 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 5)
+#define PORT_CLK_SEL_NONE REG_FIELD_PREP(PORT_CLK_SEL_MASK, 7)
/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
-#define DDI_CLK_SEL_NONE (0x0 << 28)
-#define DDI_CLK_SEL_MG (0x8 << 28)
-#define DDI_CLK_SEL_TBT_162 (0xC << 28)
-#define DDI_CLK_SEL_TBT_270 (0xD << 28)
-#define DDI_CLK_SEL_TBT_540 (0xE << 28)
-#define DDI_CLK_SEL_TBT_810 (0xF << 28)
-#define DDI_CLK_SEL_MASK (0xF << 28)
+#define DDI_CLK_SEL_MASK REG_GENMASK(31, 28)
+#define DDI_CLK_SEL_NONE REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x0)
+#define DDI_CLK_SEL_MG REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x8)
+#define DDI_CLK_SEL_TBT_162 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xC)
+#define DDI_CLK_SEL_TBT_270 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xD)
+#define DDI_CLK_SEL_TBT_540 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xE)
+#define DDI_CLK_SEL_TBT_810 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xF)
/* Transcoder clock selection */
#define _TRANS_CLK_SEL_A 0x46140
@@ -8481,6 +8476,9 @@ enum skl_power_gate {
#define SGGI_DIS REG_BIT(15)
#define SGR_DIS REG_BIT(13)
+#define XEHPSDV_TILE0_ADDR_RANGE _MMIO(0x4900)
+#define XEHPSDV_TILE_LMEM_RANGE_SHIFT 8
+
#define XEHPSDV_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
#define XEHPSDV_CCS_BASE_SHIFT 8
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index d78d78fce431..8f486f77609f 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -123,6 +123,4 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VLV_DISPLAY_BASE 0x180000
-#define GEN12_SFC_DONE_MAX 4
-
#endif /* __I915_REG_DEFS__ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 582770360ad1..73d5195146b0 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1598,7 +1598,8 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence *fence;
int ret = 0;
- dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) {
+ dma_resv_for_each_fence(&cursor, obj->base.resv,
+ dma_resv_usage_rw(write), fence) {
ret = i915_request_await_dma_fence(to, fence);
if (ret)
break;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 889f5b7dc78e..81def10eb58f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -25,7 +25,6 @@
*/
#include "display/intel_de.h"
-#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
#include "display/intel_vga.h"
@@ -119,9 +118,6 @@ void i915_restore_display(struct drm_i915_private *dev_priv)
if (GRAPHICS_VER(dev_priv) <= 4)
intel_de_write(dev_priv, DSPARB, dev_priv->regfile.saveDSPARB);
- /* only restore FBC info on the platform that supports FBC*/
- intel_fbc_global_disable(dev_priv);
-
intel_vga_redisable(dev_priv);
intel_gmbus_reset(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 2a74a9a1cafe..ae984c66c48a 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -585,7 +585,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- dma_resv_iter_begin(&cursor, resv, write);
+ dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(write));
dma_resv_for_each_fence_unlocked(&cursor, f) {
pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
gfp);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a4d1759375b9..8521daba212a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -39,113 +39,12 @@
#include "i915_sysfs.h"
#include "intel_pm.h"
-static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
+struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
{
struct drm_minor *minor = dev_get_drvdata(kdev);
return to_i915(minor->dev);
}
-#ifdef CONFIG_PM
-static u32 calc_residency(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- intel_wakeref_t wakeref;
- u64 res = 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- res = intel_rc6_residency_us(&to_gt(dev_priv)->rc6, reg);
-
- return DIV_ROUND_CLOSEST_ULL(res, 1000);
-}
-
-static ssize_t rc6_enable_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- unsigned int mask;
-
- mask = 0;
- if (HAS_RC6(dev_priv))
- mask |= BIT(0);
- if (HAS_RC6p(dev_priv))
- mask |= BIT(1);
- if (HAS_RC6pp(dev_priv))
- mask |= BIT(2);
-
- return sysfs_emit(buf, "%x\n", mask);
-}
-
-static ssize_t rc6_residency_ms_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
- return sysfs_emit(buf, "%u\n", rc6_residency);
-}
-
-static ssize_t rc6p_residency_ms_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
- return sysfs_emit(buf, "%u\n", rc6p_residency);
-}
-
-static ssize_t rc6pp_residency_ms_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
- return sysfs_emit(buf, "%u\n", rc6pp_residency);
-}
-
-static ssize_t media_rc6_residency_ms_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
- return sysfs_emit(buf, "%u\n", rc6_residency);
-}
-
-static DEVICE_ATTR_RO(rc6_enable);
-static DEVICE_ATTR_RO(rc6_residency_ms);
-static DEVICE_ATTR_RO(rc6p_residency_ms);
-static DEVICE_ATTR_RO(rc6pp_residency_ms);
-static DEVICE_ATTR_RO(media_rc6_residency_ms);
-
-static struct attribute *rc6_attrs[] = {
- &dev_attr_rc6_enable.attr,
- &dev_attr_rc6_residency_ms.attr,
- NULL
-};
-
-static const struct attribute_group rc6_attr_group = {
- .name = power_group_name,
- .attrs = rc6_attrs
-};
-
-static struct attribute *rc6p_attrs[] = {
- &dev_attr_rc6p_residency_ms.attr,
- &dev_attr_rc6pp_residency_ms.attr,
- NULL
-};
-
-static const struct attribute_group rc6p_attr_group = {
- .name = power_group_name,
- .attrs = rc6p_attrs
-};
-
-static struct attribute *media_rc6_attrs[] = {
- &dev_attr_media_rc6_residency_ms.attr,
- NULL
-};
-
-static const struct attribute_group media_rc6_attr_group = {
- .name = power_group_name,
- .attrs = media_rc6_attrs
-};
-#endif
-
static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
{
if (!HAS_L3_DPF(i915))
@@ -257,171 +156,6 @@ static const struct bin_attribute dpf_attrs_1 = {
.private = (void *)1
};
-static ssize_t gt_act_freq_mhz_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(i915)->rps;
-
- return sysfs_emit(buf, "%d\n", intel_rps_read_actual_frequency(rps));
-}
-
-static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(i915)->rps;
-
- return sysfs_emit(buf, "%d\n", intel_rps_get_requested_frequency(rps));
-}
-
-static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(i915)->rps;
-
- return sysfs_emit(buf, "%d\n", intel_rps_get_boost_frequency(rps));
-}
-
-static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(dev_priv)->rps;
- ssize_t ret;
- u32 val;
-
- ret = kstrtou32(buf, 0, &val);
- if (ret)
- return ret;
-
- ret = intel_rps_set_boost_frequency(rps, val);
-
- return ret ?: count;
-}
-
-static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(dev_priv)->rps;
-
- return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->efficient_freq));
-}
-
-static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = to_gt(dev_priv);
- struct intel_rps *rps = &gt->rps;
-
- return sysfs_emit(buf, "%d\n", intel_rps_get_max_frequency(rps));
-}
-
-static ssize_t gt_max_freq_mhz_store(struct device *kdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = to_gt(dev_priv);
- struct intel_rps *rps = &gt->rps;
- ssize_t ret;
- u32 val;
-
- ret = kstrtou32(buf, 0, &val);
- if (ret)
- return ret;
-
- ret = intel_rps_set_max_frequency(rps, val);
-
- return ret ?: count;
-}
-
-static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = to_gt(i915);
- struct intel_rps *rps = &gt->rps;
-
- return sysfs_emit(buf, "%d\n", intel_rps_get_min_frequency(rps));
-}
-
-static ssize_t gt_min_freq_mhz_store(struct device *kdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(i915)->rps;
- ssize_t ret;
- u32 val;
-
- ret = kstrtou32(buf, 0, &val);
- if (ret)
- return ret;
-
- ret = intel_rps_set_min_frequency(rps, val);
-
- return ret ?: count;
-}
-
-static DEVICE_ATTR_RO(gt_act_freq_mhz);
-static DEVICE_ATTR_RO(gt_cur_freq_mhz);
-static DEVICE_ATTR_RW(gt_boost_freq_mhz);
-static DEVICE_ATTR_RW(gt_max_freq_mhz);
-static DEVICE_ATTR_RW(gt_min_freq_mhz);
-
-static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
-
-static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
-static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
-static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
-static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
-
-/* For now we have a static number of RP states */
-static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(dev_priv)->rps;
- u32 val;
-
- if (attr == &dev_attr_gt_RP0_freq_mhz)
- val = intel_rps_get_rp0_frequency(rps);
- else if (attr == &dev_attr_gt_RP1_freq_mhz)
- val = intel_rps_get_rp1_frequency(rps);
- else if (attr == &dev_attr_gt_RPn_freq_mhz)
- val = intel_rps_get_rpn_frequency(rps);
- else
- BUG();
-
- return sysfs_emit(buf, "%d\n", val);
-}
-
-static const struct attribute * const gen6_attrs[] = {
- &dev_attr_gt_act_freq_mhz.attr,
- &dev_attr_gt_cur_freq_mhz.attr,
- &dev_attr_gt_boost_freq_mhz.attr,
- &dev_attr_gt_max_freq_mhz.attr,
- &dev_attr_gt_min_freq_mhz.attr,
- &dev_attr_gt_RP0_freq_mhz.attr,
- &dev_attr_gt_RP1_freq_mhz.attr,
- &dev_attr_gt_RPn_freq_mhz.attr,
- NULL,
-};
-
-static const struct attribute * const vlv_attrs[] = {
- &dev_attr_gt_act_freq_mhz.attr,
- &dev_attr_gt_cur_freq_mhz.attr,
- &dev_attr_gt_boost_freq_mhz.attr,
- &dev_attr_gt_max_freq_mhz.attr,
- &dev_attr_gt_min_freq_mhz.attr,
- &dev_attr_gt_RP0_freq_mhz.attr,
- &dev_attr_gt_RP1_freq_mhz.attr,
- &dev_attr_gt_RPn_freq_mhz.attr,
- &dev_attr_vlv_rpe_freq_mhz.attr,
- NULL,
-};
-
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
@@ -492,29 +226,6 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
struct device *kdev = dev_priv->drm.primary->kdev;
int ret;
-#ifdef CONFIG_PM
- if (HAS_RC6(dev_priv)) {
- ret = sysfs_merge_group(&kdev->kobj,
- &rc6_attr_group);
- if (ret)
- drm_err(&dev_priv->drm,
- "RC6 residency sysfs setup failed\n");
- }
- if (HAS_RC6p(dev_priv)) {
- ret = sysfs_merge_group(&kdev->kobj,
- &rc6p_attr_group);
- if (ret)
- drm_err(&dev_priv->drm,
- "RC6p residency sysfs setup failed\n");
- }
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- ret = sysfs_merge_group(&kdev->kobj,
- &media_rc6_attr_group);
- if (ret)
- drm_err(&dev_priv->drm,
- "Media RC6 residency sysfs setup failed\n");
- }
-#endif
if (HAS_L3_DPF(dev_priv)) {
ret = device_create_bin_file(kdev, &dpf_attrs);
if (ret)
@@ -530,13 +241,10 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
}
}
- ret = 0;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
- else if (GRAPHICS_VER(dev_priv) >= 6)
- ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
- if (ret)
- drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
+ dev_priv->sysfs_gt = kobject_create_and_add("gt", &kdev->kobj);
+ if (!dev_priv->sysfs_gt)
+ drm_warn(&dev_priv->drm,
+ "failed to register GT sysfs directory\n");
i915_setup_error_capture(kdev);
@@ -549,14 +257,6 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
i915_teardown_error_capture(kdev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- sysfs_remove_files(&kdev->kobj, vlv_attrs);
- else
- sysfs_remove_files(&kdev->kobj, gen6_attrs);
device_remove_bin_file(kdev, &dpf_attrs_1);
device_remove_bin_file(kdev, &dpf_attrs);
-#ifdef CONFIG_PM
- sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
- sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
-#endif
}
diff --git a/drivers/gpu/drm/i915/i915_sysfs.h b/drivers/gpu/drm/i915/i915_sysfs.h
index 41afd4366416..243a17741e3f 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.h
+++ b/drivers/gpu/drm/i915/i915_sysfs.h
@@ -6,8 +6,11 @@
#ifndef __I915_SYSFS_H__
#define __I915_SYSFS_H__
+struct device;
struct drm_i915_private;
+struct drm_i915_private *kdev_minor_to_i915(struct device *kdev);
+
void i915_setup_sysfs(struct drm_i915_private *i915);
void i915_teardown_sysfs(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 129f668f21ff..a5109548abc0 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -70,8 +70,10 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
min_page_size = bo->page_alignment << PAGE_SHIFT;
GEM_BUG_ON(min_page_size < mm->chunk_size);
+ GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ if (place->fpfn + bman_res->base.num_pages != place->lpfn &&
+ place->flags & TTM_PL_FLAG_CONTIGUOUS) {
unsigned long pages;
size = roundup_pow_of_two(size);
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index f9e780dee9de..29fd02bf5ea8 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/device.h>
+
#include <drm/drm_drv.h>
#include "i915_drv.h"
@@ -114,3 +116,12 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout)
/* Keep t->expires = 0 reserved to indicate a canceled timer. */
mod_timer(t, jiffies + timeout ?: 1);
}
+
+bool i915_vtd_active(struct drm_i915_private *i915)
+{
+ if (device_iommu_mapped(i915->drm.dev))
+ return true;
+
+ /* Running as a guest, we assume the host is enforcing VT'd */
+ return i915_run_as_guest();
+}
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index bfafd0afd117..ea7648e3aa0e 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -28,10 +28,15 @@
#include <linux/list.h>
#include <linux/overflow.h>
#include <linux/sched.h>
+#include <linux/string_helpers.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/sched/clock.h>
+#ifdef CONFIG_X86
+#include <asm/hypervisor.h>
+#endif
+
struct drm_i915_private;
struct timer_list;
@@ -399,26 +404,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
#define MBps(x) KBps(1000 * (x))
#define GBps(x) ((u64)1000 * MBps((x)))
-static inline const char *yesno(bool v)
-{
- return v ? "yes" : "no";
-}
-
-static inline const char *onoff(bool v)
-{
- return v ? "on" : "off";
-}
-
-static inline const char *enabledisable(bool v)
-{
- return v ? "enable" : "disable";
-}
-
-static inline const char *enableddisabled(bool v)
-{
- return v ? "enabled" : "disabled";
-}
-
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
static inline void __add_taint_for_CI(unsigned int taint)
{
@@ -444,4 +429,16 @@ static inline bool timer_expired(const struct timer_list *t)
return timer_active(t) && !timer_pending(t);
}
+static inline bool i915_run_as_guest(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
+#else
+ /* Not supported yet */
+ return false;
+#endif
+}
+
+bool i915_vtd_active(struct drm_i915_private *i915);
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index eeaa8d0d0407..4f6db539571a 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -47,7 +47,7 @@ static inline void assert_vma_held_evict(const struct i915_vma *vma)
* This is the only exception to the requirement of the object lock
* being held.
*/
- if (atomic_read(&vma->vm->open))
+ if (kref_read(&vma->vm->ref))
assert_object_held_shared(vma->obj);
}
@@ -113,6 +113,7 @@ vma_create(struct drm_i915_gem_object *obj,
struct i915_vma *pos = ERR_PTR(-E2BIG);
struct i915_vma *vma;
struct rb_node *rb, **p;
+ int err;
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
@@ -121,8 +122,6 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- kref_init(&vma->ref);
- vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->size = obj->base.size;
@@ -138,6 +137,8 @@ vma_create(struct drm_i915_gem_object *obj,
}
INIT_LIST_HEAD(&vma->closed_link);
+ INIT_LIST_HEAD(&vma->obj_link);
+ RB_CLEAR_NODE(&vma->obj_node);
if (view && view->type != I915_GGTT_VIEW_NORMAL) {
vma->ggtt_view = *view;
@@ -163,8 +164,16 @@ vma_create(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
- spin_lock(&obj->vma.lock);
+ err = mutex_lock_interruptible(&vm->mutex);
+ if (err) {
+ pos = ERR_PTR(err);
+ goto err_vma;
+ }
+
+ vma->vm = vm;
+ list_add_tail(&vma->vm_link, &vm->unbound_list);
+ spin_lock(&obj->vma.lock);
if (i915_is_ggtt(vm)) {
if (unlikely(overflows_type(vma->size, u32)))
goto err_unlock;
@@ -222,13 +231,15 @@ vma_create(struct drm_i915_gem_object *obj,
list_add_tail(&vma->obj_link, &obj->vma.list);
spin_unlock(&obj->vma.lock);
+ mutex_unlock(&vm->mutex);
return vma;
err_unlock:
spin_unlock(&obj->vma.lock);
+ list_del_init(&vma->vm_link);
+ mutex_unlock(&vm->mutex);
err_vma:
- i915_vm_put(vm);
i915_vma_free(vma);
return pos;
}
@@ -279,7 +290,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
- GEM_BUG_ON(!atomic_read(&vm->open));
+ GEM_BUG_ON(!kref_read(&vm->ref));
spin_lock(&obj->vma.lock);
vma = i915_vma_lookup(obj, vm, view);
@@ -322,7 +333,6 @@ static void __vma_release(struct dma_fence_work *work)
i915_gem_object_put(vw->pinned);
i915_vm_free_pt_stash(vw->vm, &vw->stash);
- i915_vm_put(vw->vm);
if (vw->vma_res)
i915_vma_resource_put(vw->vma_res);
}
@@ -515,21 +525,18 @@ int i915_vma_bind(struct i915_vma *vma,
if (!work->vma_res->bi.pages_rsgt)
work->pinned = i915_gem_object_get(vma->obj);
} else {
- if (vma->obj) {
- ret = i915_gem_object_wait_moving_fence(vma->obj, true);
- if (ret) {
- i915_vma_resource_free(vma->resource);
- vma->resource = NULL;
+ ret = i915_gem_object_wait_moving_fence(vma->obj, true);
+ if (ret) {
+ i915_vma_resource_free(vma->resource);
+ vma->resource = NULL;
- return ret;
- }
+ return ret;
}
vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
bind_flags);
}
- if (vma->obj)
- set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
+ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
atomic_or(bind_flags, &vma->flags);
return 0;
@@ -541,7 +548,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
int err;
if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
- return IO_ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
if (!i915_gem_object_is_lmem(vma->obj)) {
if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
@@ -594,7 +601,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
err_unpin:
__i915_vma_unpin(vma);
err:
- return IO_ERR_PTR(err);
+ return IOMEM_ERR_PTR(err);
}
void i915_vma_flush_writes(struct i915_vma *vma)
@@ -841,7 +848,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
- list_add_tail(&vma->vm_link, &vma->vm->bound_list);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
}
@@ -857,7 +864,7 @@ i915_vma_detach(struct i915_vma *vma)
* vma, we can drop its hold on the backing storage and allow
* it to be reaped by the shrinker.
*/
- list_del(&vma->vm_link);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
}
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
@@ -1360,8 +1367,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
- moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
- if (flags & vma->vm->bind_async_flags || moving) {
+ if (flags & vma->vm->bind_async_flags) {
/* lock VM */
err = i915_vm_lock_objects(vma->vm, ww);
if (err)
@@ -1373,7 +1379,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
goto err_rpm;
}
- work->vm = i915_vm_get(vma->vm);
+ work->vm = vma->vm;
+
+ err = i915_gem_object_get_moving_fence(vma->obj, &moving);
+ if (err)
+ goto err_rpm;
dma_fence_work_chain(&work->base, moving);
@@ -1555,9 +1565,7 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (ww)
return __i915_ggtt_pin(vma, ww, align, flags);
-#ifdef CONFIG_LOCKDEP
- WARN_ON(dma_resv_held(vma->obj->base.resv));
-#endif
+ lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
for_i915_gem_ww(&_ww, err, true) {
err = i915_gem_object_lock(vma->obj, &_ww);
@@ -1618,16 +1626,6 @@ void i915_vma_reopen(struct i915_vma *vma)
spin_unlock_irq(&gt->closed_lock);
}
-void i915_vma_release(struct kref *ref)
-{
- struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
-
- i915_vm_put(vma->vm);
- i915_active_fini(&vma->active);
- GEM_WARN_ON(vma->resource);
- i915_vma_free(vma);
-}
-
static void force_unbind(struct i915_vma *vma)
{
if (!drm_mm_node_allocated(&vma->node))
@@ -1638,7 +1636,7 @@ static void force_unbind(struct i915_vma *vma)
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
}
-static void release_references(struct i915_vma *vma)
+static void release_references(struct i915_vma *vma, bool vm_ddestroy)
{
struct drm_i915_gem_object *obj = vma->obj;
struct intel_gt *gt = vma->vm->gt;
@@ -1649,13 +1647,19 @@ static void release_references(struct i915_vma *vma)
list_del(&vma->obj_link);
if (!RB_EMPTY_NODE(&vma->obj_node))
rb_erase(&vma->obj_node, &obj->vma.tree);
+
spin_unlock(&obj->vma.lock);
spin_lock_irq(&gt->closed_lock);
__i915_vma_remove_closed(vma);
spin_unlock_irq(&gt->closed_lock);
- __i915_vma_put(vma);
+ if (vm_ddestroy)
+ i915_vm_resv_put(vma->vm);
+
+ i915_active_fini(&vma->active);
+ GEM_WARN_ON(vma->resource);
+ i915_vma_free(vma);
}
/**
@@ -1670,8 +1674,12 @@ static void release_references(struct i915_vma *vma)
* - __i915_gem_object_pages_fini()
* - __i915_vm_close() - Blocks the above function by taking a reference on
* the object.
- * - __i915_vma_parked() - Blocks the above functions by taking an open-count on
- * the vm and a reference on the object.
+ * - __i915_vma_parked() - Blocks the above functions by taking a reference
+ * on the vm and a reference on the object. Also takes the object lock so
+ * destruction from __i915_vma_parked() can be blocked by holding the
+ * object lock. Since the object lock is only allowed from within i915 with
+ * an object refcount, holding the object lock also implicitly blocks the
+ * vma freeing from __i915_gem_object_pages_fini().
*
* Because of locks taken during destruction, a vma is also guaranteed to
* stay alive while the following locks are held if it was looked up while
@@ -1679,24 +1687,27 @@ static void release_references(struct i915_vma *vma)
* - vm->mutex
* - obj->vma.lock
* - gt->closed_lock
- *
- * A vma user can also temporarily keep the vma alive while holding a vma
- * reference.
*/
void i915_vma_destroy_locked(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->mutex);
force_unbind(vma);
- release_references(vma);
+ list_del_init(&vma->vm_link);
+ release_references(vma, false);
}
void i915_vma_destroy(struct i915_vma *vma)
{
+ bool vm_ddestroy;
+
mutex_lock(&vma->vm->mutex);
force_unbind(vma);
+ list_del_init(&vma->vm_link);
+ vm_ddestroy = vma->vm_ddestroy;
+ vma->vm_ddestroy = false;
mutex_unlock(&vma->vm->mutex);
- release_references(vma);
+ release_references(vma, vm_ddestroy);
}
void i915_vma_parked(struct intel_gt *gt)
@@ -1714,7 +1725,7 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (!i915_vm_tryopen(vm)) {
+ if (!i915_vm_tryget(vm)) {
i915_gem_object_put(obj);
continue;
}
@@ -1740,7 +1751,7 @@ void i915_vma_parked(struct intel_gt *gt)
}
i915_gem_object_put(obj);
- i915_vm_close(vm);
+ i915_vm_put(vm);
}
}
@@ -1822,20 +1833,28 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
intel_frontbuffer_put(front);
}
+ if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
+ err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+ if (unlikely(err))
+ return err;
+ }
+
if (fence) {
- dma_resv_add_excl_fence(vma->obj->base.resv, fence);
+ dma_resv_add_fence(vma->obj->base.resv, fence,
+ DMA_RESV_USAGE_WRITE);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
}
} else {
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
- err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
+ err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
if (unlikely(err))
return err;
}
if (fence) {
- dma_resv_add_shared_fence(vma->obj->base.resv, fence);
+ dma_resv_add_fence(vma->obj->base.resv, fence,
+ DMA_RESV_USAGE_READ);
obj->write_domain = 0;
}
}
@@ -1891,7 +1910,9 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
/* If vm is not open, unbind is a nop. */
vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
- atomic_read(&vma->vm->open);
+ kref_read(&vma->vm->ref);
+ vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
+ vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
unbind_fence = i915_vma_resource_unbind(vma_res);
@@ -2047,7 +2068,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
if (!obj->mm.rsgt)
return -EBUSY;
- err = dma_resv_reserve_shared(obj->base.resv, 1);
+ err = dma_resv_reserve_fences(obj->base.resv, 1);
if (err)
return -EBUSY;
@@ -2075,7 +2096,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
goto out_rpm;
}
- dma_resv_add_shared_fence(obj->base.resv, fence);
+ dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
dma_fence_put(fence);
out_rpm:
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 67ae7341c7e0..88ca0bd9c900 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -222,20 +222,6 @@ void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
-static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma)
-{
- if (kref_get_unless_zero(&vma->ref))
- return vma;
-
- return NULL;
-}
-
-void i915_vma_release(struct kref *ref);
-static inline void __i915_vma_put(struct i915_vma *vma)
-{
- kref_put(&vma->ref, i915_vma_release);
-}
-
void i915_vma_destroy_locked(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
@@ -331,7 +317,6 @@ static inline bool i915_node_color_differs(const struct drm_mm_node *node,
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
-#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
/**
* i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 57ae92ba8af1..27c55027387a 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -178,7 +178,7 @@ static void i915_vma_resource_unbind_work(struct work_struct *work)
bool lockdep_cookie;
lockdep_cookie = dma_fence_begin_signalling();
- if (likely(atomic_read(&vm->open)))
+ if (likely(!vma_res->skip_pte_rewrite))
vma_res->ops->unbind_vma(vm, vma_res);
dma_fence_end_signalling(lockdep_cookie);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
index 25913913baa6..5d8427caa2ba 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.h
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -62,6 +62,11 @@ struct i915_page_sizes {
* deferred to a work item awaiting unsignaled fences. This is a hack.
* (dma_fence_work uses a fence flag for this, but this seems slightly
* cleaner).
+ * @needs_wakeref: Whether a wakeref is needed during unbind. Since we can't
+ * take a wakeref in the dma-fence signalling critical path, it needs to be
+ * taken when the unbind is scheduled.
+ * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
+ * needs to be skipped for unbind.
*
* The lifetime of a struct i915_vma_resource is from a binding request to
* the actual possible asynchronous unbind has completed.
@@ -113,6 +118,7 @@ struct i915_vma_resource {
bool allocated:1;
bool immediate_unbind:1;
bool needs_wakeref:1;
+ bool skip_pte_rewrite:1;
};
bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 88370dadca82..be6e028c3b57 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -211,7 +211,6 @@ struct i915_vma {
* handles (but same file) for execbuf, i.e. the number of aliases
* that exist in the ctx->handle_vmas LUT for this vma.
*/
- struct kref ref;
atomic_t open_count;
atomic_t flags;
/**
@@ -272,6 +271,13 @@ struct i915_vma {
atomic_t pages_count; /* number of active binds to the pages */
/**
+ * Whether we hold a reference on the vm dma_resv lock to temporarily
+ * block vm freeing until the vma is destroyed.
+ * Protected by the vm mutex.
+ */
+ bool vm_ddestroy;
+
+ /**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
* i915_ggtt_view_type is used to distinguish between those entries.
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 32c5f10e31db..7eb893666595 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -22,6 +22,8 @@
*
*/
+#include <linux/string_helpers.h>
+
#include <drm/drm_print.h>
#include <drm/i915_pciids.h>
@@ -29,6 +31,7 @@
#include "display/intel_de.h"
#include "intel_device_info.h"
#include "i915_drv.h"
+#include "i915_utils.h"
#define PLATFORM_NAME(x) [INTEL_##x] = #x
static const char * const platform_names[] = {
@@ -69,6 +72,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(ALDERLAKE_P),
PLATFORM_NAME(XEHPSDV),
PLATFORM_NAME(DG2),
+ PLATFORM_NAME(PONTEVECCHIO),
};
#undef PLATFORM_NAME
@@ -110,11 +114,11 @@ void intel_device_info_print_static(const struct intel_device_info *info,
drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
-#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name))
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name))
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display.name))
DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
}
@@ -178,8 +182,21 @@ static const u16 subplatform_n_ids[] = {
INTEL_ADLN_IDS(0),
};
-static const u16 subplatform_rpls_ids[] = {
+static const u16 subplatform_rpl_ids[] = {
INTEL_RPLS_IDS(0),
+ INTEL_RPLP_IDS(0),
+};
+
+static const u16 subplatform_g10_ids[] = {
+ INTEL_DG2_G10_IDS(0),
+};
+
+static const u16 subplatform_g11_ids[] = {
+ INTEL_DG2_G11_IDS(0),
+};
+
+static const u16 subplatform_g12_ids[] = {
+ INTEL_DG2_G12_IDS(0),
};
static bool find_devid(u16 id, const u16 *p, unsigned int num)
@@ -224,9 +241,18 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
} else if (find_devid(devid, subplatform_n_ids,
ARRAY_SIZE(subplatform_n_ids))) {
mask = BIT(INTEL_SUBPLATFORM_N);
- } else if (find_devid(devid, subplatform_rpls_ids,
- ARRAY_SIZE(subplatform_rpls_ids))) {
- mask = BIT(INTEL_SUBPLATFORM_RPL_S);
+ } else if (find_devid(devid, subplatform_rpl_ids,
+ ARRAY_SIZE(subplatform_rpl_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_RPL);
+ } else if (find_devid(devid, subplatform_g10_ids,
+ ARRAY_SIZE(subplatform_g10_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_G10);
+ } else if (find_devid(devid, subplatform_g11_ids,
+ ARRAY_SIZE(subplatform_g11_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_G11);
+ } else if (find_devid(devid, subplatform_g12_ids,
+ ARRAY_SIZE(subplatform_g12_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_G12);
}
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
@@ -366,7 +392,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->display.has_dsc = 0;
}
- if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active(dev_priv)) {
+ if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) {
drm_info(&dev_priv->drm,
"Disabling ppGTT for VT-d support\n");
info->ppgtt_type = INTEL_PPGTT_NONE;
@@ -388,6 +414,6 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p)
{
drm_printf(p, "Has logical contexts? %s\n",
- yesno(caps->has_logical_contexts));
+ str_yes_no(caps->has_logical_contexts));
drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 291215d9da28..e7d2cf7d65c8 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -88,6 +88,7 @@ enum intel_platform {
INTEL_ALDERLAKE_P,
INTEL_XEHPSDV,
INTEL_DG2,
+ INTEL_PONTEVECCHIO,
INTEL_MAX_PLATFORMS
};
@@ -114,11 +115,16 @@ enum intel_platform {
#define INTEL_SUBPLATFORM_G11 1
#define INTEL_SUBPLATFORM_G12 2
-/* ADL-S */
-#define INTEL_SUBPLATFORM_RPL_S 0
+/* ADL */
+#define INTEL_SUBPLATFORM_RPL 0
/* ADL-P */
-#define INTEL_SUBPLATFORM_N 0
+/*
+ * As #define INTEL_SUBPLATFORM_RPL 0 will apply
+ * here too, SUBPLATFORM_N will have different
+ * bit set
+ */
+#define INTEL_SUBPLATFORM_N 1
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
@@ -137,9 +143,12 @@ enum intel_ppgtt_type {
func(needs_compact_pt); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
+ func(has_4tile); \
func(has_flat_ccs); \
func(has_global_mocs); \
func(has_gt_uc); \
+ func(has_heci_pxp); \
+ func(has_heci_gscfi); \
func(has_guc_deprivilege); \
func(has_l3_dpf); \
func(has_llc); \
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 174c95c3e10f..2b9e7833da96 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -3,6 +3,8 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/string_helpers.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_dram.h"
@@ -136,7 +138,7 @@ skl_dram_get_dimm_info(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm,
"CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
- yesno(skl_is_16gb_dimm(dimm)));
+ str_yes_no(skl_is_16gb_dimm(dimm)));
}
static int
@@ -165,7 +167,7 @@ skl_dram_get_channel_info(struct drm_i915_private *i915,
skl_is_16gb_dimm(&ch->dimm_s);
drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
- channel, ch->ranks, yesno(ch->is_16gb_dimm));
+ channel, ch->ranks, str_yes_no(ch->is_16gb_dimm));
return 0;
}
@@ -214,7 +216,7 @@ skl_dram_get_channels_info(struct drm_i915_private *i915)
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
- yesno(dram_info->symmetric_memory));
+ str_yes_no(dram_info->symmetric_memory));
return 0;
}
@@ -492,7 +494,7 @@ void intel_dram_detect(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
- yesno(dram_info->wm_lv_0_adjust_needed));
+ str_yes_no(dram_info->wm_lv_0_adjust_needed));
}
static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index cf6e98962d82..e98b6d69a91a 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -24,7 +24,10 @@
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "intel_gvt.h"
-#include "gvt/gvt.h"
+#include "gem/i915_gem_dmabuf.h"
+#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
+#include "gt/shmem_utils.h"
/**
* DOC: Intel GVT-g host support
@@ -41,6 +44,10 @@
* doc is available on https://01.org/group/2230/documentation-list.
*/
+static LIST_HEAD(intel_gvt_devices);
+static const struct intel_vgpu_ops *intel_gvt_ops;
+static DEFINE_MUTEX(intel_gvt_mutex);
+
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
if (IS_BROADWELL(dev_priv))
@@ -59,32 +66,162 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return false;
}
-/**
- * intel_gvt_sanitize_options - sanitize GVT related options
- * @dev_priv: drm i915 private data
- *
- * This function is called at the i915 options sanitize stage.
- */
-void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
+static void free_initial_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
+
+ vfree(vgpu->initial_mmio);
+ vgpu->initial_mmio = NULL;
+
+ kfree(vgpu->initial_cfg_space);
+ vgpu->initial_cfg_space = NULL;
+}
+
+static void save_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
+ u32 size)
+{
+ struct drm_i915_private *dev_priv = iter->i915;
+ u32 *mmio, i;
+
+ for (i = offset; i < offset + size; i += 4) {
+ mmio = iter->data + i;
+ *mmio = intel_uncore_read_notrace(to_gt(dev_priv)->uncore,
+ _MMIO(i));
+ }
+}
+
+static int handle_mmio(struct intel_gvt_mmio_table_iter *iter,
+ u32 offset, u32 size)
+{
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+
+ save_mmio(iter, offset, size);
+ return 0;
+}
+
+static int save_initial_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
+ struct intel_gvt_mmio_table_iter iter;
+ void *mem;
+ int i, ret;
+
+ mem = kzalloc(PCI_CFG_SPACE_EXP_SIZE, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ vgpu->initial_cfg_space = mem;
+
+ for (i = 0; i < PCI_CFG_SPACE_EXP_SIZE; i += 4)
+ pci_read_config_dword(pdev, i, mem + i);
+
+ mem = vzalloc(2 * SZ_1M);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto err_mmio;
+ }
+
+ vgpu->initial_mmio = mem;
+
+ iter.i915 = dev_priv;
+ iter.data = vgpu->initial_mmio;
+ iter.handle_mmio_cb = handle_mmio;
+
+ ret = intel_gvt_iterate_mmio_table(&iter);
+ if (ret)
+ goto err_iterate;
+
+ return 0;
+
+err_iterate:
+ vfree(vgpu->initial_mmio);
+ vgpu->initial_mmio = NULL;
+err_mmio:
+ kfree(vgpu->initial_cfg_space);
+ vgpu->initial_cfg_space = NULL;
+
+ return ret;
+}
+
+static void intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->params.enable_gvt)
+ if (!dev_priv->params.enable_gvt) {
+ drm_dbg(&dev_priv->drm,
+ "GVT-g is disabled by kernel params\n");
return;
+ }
if (intel_vgpu_active(dev_priv)) {
drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
- goto bail;
+ return;
}
if (!is_supported_device(dev_priv)) {
drm_info(&dev_priv->drm,
"Unsupported device. GVT-g is disabled\n");
- goto bail;
+ return;
+ }
+
+ if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
+ drm_err(&dev_priv->drm,
+ "Graphics virtualization is not yet supported with GuC submission\n");
+ return;
}
- return;
-bail:
- dev_priv->params.enable_gvt = 0;
+ if (save_initial_hw_state(dev_priv)) {
+ drm_dbg(&dev_priv->drm, "Failed to save initial HW state\n");
+ return;
+ }
+
+ if (intel_gvt_ops->init_device(dev_priv))
+ drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
+}
+
+static void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->gvt)
+ intel_gvt_ops->clean_device(dev_priv);
+ free_initial_hw_state(dev_priv);
+}
+
+int intel_gvt_set_ops(const struct intel_vgpu_ops *ops)
+{
+ struct drm_i915_private *dev_priv;
+
+ mutex_lock(&intel_gvt_mutex);
+ if (intel_gvt_ops) {
+ mutex_unlock(&intel_gvt_mutex);
+ return -EINVAL;
+ }
+ intel_gvt_ops = ops;
+
+ list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
+ intel_gvt_init_device(dev_priv);
+ mutex_unlock(&intel_gvt_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT);
+
+void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops)
+{
+ struct drm_i915_private *dev_priv;
+
+ mutex_lock(&intel_gvt_mutex);
+ if (intel_gvt_ops != ops) {
+ mutex_unlock(&intel_gvt_mutex);
+ return;
+ }
+
+ list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
+ intel_gvt_clean_device(dev_priv);
+
+ intel_gvt_ops = NULL;
+ mutex_unlock(&intel_gvt_mutex);
}
+EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT);
/**
* intel_gvt_init - initialize GVT components
@@ -98,41 +235,18 @@ bail:
*/
int intel_gvt_init(struct drm_i915_private *dev_priv)
{
- int ret;
-
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- if (!dev_priv->params.enable_gvt) {
- drm_dbg(&dev_priv->drm,
- "GVT-g is disabled by kernel params\n");
- return 0;
- }
-
- if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
- drm_err(&dev_priv->drm,
- "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
- return -EIO;
- }
-
- ret = intel_gvt_init_device(dev_priv);
- if (ret) {
- drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
- goto bail;
- }
-
- return 0;
+ mutex_lock(&intel_gvt_mutex);
+ list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices);
+ if (intel_gvt_ops)
+ intel_gvt_init_device(dev_priv);
+ mutex_unlock(&intel_gvt_mutex);
-bail:
- dev_priv->params.enable_gvt = 0;
return 0;
}
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gvt;
-}
-
/**
* intel_gvt_driver_remove - cleanup GVT components when i915 driver is
* unbinding
@@ -143,10 +257,10 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
*/
void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
- if (!intel_gvt_active(dev_priv))
- return;
-
+ mutex_lock(&intel_gvt_mutex);
intel_gvt_clean_device(dev_priv);
+ list_del(&dev_priv->vgpu.entry);
+ mutex_unlock(&intel_gvt_mutex);
}
/**
@@ -159,6 +273,50 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
*/
void intel_gvt_resume(struct drm_i915_private *dev_priv)
{
- if (intel_gvt_active(dev_priv))
- intel_gvt_pm_resume(dev_priv->gvt);
+ mutex_lock(&intel_gvt_mutex);
+ if (dev_priv->gvt)
+ intel_gvt_ops->pm_resume(dev_priv);
+ mutex_unlock(&intel_gvt_mutex);
}
+
+/*
+ * Exported here so that the exports only get created when GVT support is
+ * actually enabled.
+ */
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT);
+#endif
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_fence_ops, I915_GVT);
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index d7d3fb6186fd..eb2a2be252ca 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,16 +24,34 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
+#include <linux/types.h>
+
struct drm_i915_private;
#ifdef CONFIG_DRM_I915_GVT
+
+struct intel_gvt_mmio_table_iter {
+ struct drm_i915_private *i915;
+ void *data;
+ int (*handle_mmio_cb)(struct intel_gvt_mmio_table_iter *iter,
+ u32 offset, u32 size);
+};
+
int intel_gvt_init(struct drm_i915_private *dev_priv);
void intel_gvt_driver_remove(struct drm_i915_private *dev_priv);
-int intel_gvt_init_device(struct drm_i915_private *dev_priv);
-void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
int intel_gvt_init_host(void);
-void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
void intel_gvt_resume(struct drm_i915_private *dev_priv);
+int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter);
+
+struct intel_vgpu_ops {
+ int (*init_device)(struct drm_i915_private *dev_priv);
+ void (*clean_device)(struct drm_i915_private *dev_priv);
+ void (*pm_resume)(struct drm_i915_private *i915);
+};
+
+int intel_gvt_set_ops(const struct intel_vgpu_ops *ops);
+void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops);
+
#else
static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
{
@@ -44,12 +62,16 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
}
-static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
+static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
{
}
-static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
+struct intel_gvt_mmio_table_iter {
+};
+
+static inline int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter)
{
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
new file mode 100644
index 000000000000..72dac1718f3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -0,0 +1,1292 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "display/intel_dmc_regs.h"
+#include "display/vlv_dsi_pll_regs.h"
+#include "gt/intel_gt_regs.h"
+#include "gvt/gvt.h"
+#include "i915_drv.h"
+#include "i915_pvinfo.h"
+#include "i915_reg.h"
+#include "intel_gvt.h"
+#include "intel_mchbar_regs.h"
+
+#define MMIO_F(reg, s) do { \
+ int ret; \
+ ret = iter->handle_mmio_cb(iter, i915_mmio_reg_offset(reg), s); \
+ if (ret) \
+ return ret; \
+} while (0)
+
+#define MMIO_D(reg) MMIO_F(reg, 4)
+
+#define MMIO_RING_F(prefix, s) do { \
+ MMIO_F(prefix(RENDER_RING_BASE), s); \
+ MMIO_F(prefix(BLT_RING_BASE), s); \
+ MMIO_F(prefix(GEN6_BSD_RING_BASE), s); \
+ MMIO_F(prefix(VEBOX_RING_BASE), s); \
+ if (HAS_ENGINE(to_gt(iter->i915), VCS1)) \
+ MMIO_F(prefix(GEN8_BSD2_RING_BASE), s); \
+} while (0)
+
+#define MMIO_RING_D(prefix) \
+ MMIO_RING_F(prefix, 4)
+
+static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ struct drm_i915_private *dev_priv = iter->i915;
+
+ MMIO_RING_D(RING_IMR);
+ MMIO_D(SDEIMR);
+ MMIO_D(SDEIER);
+ MMIO_D(SDEIIR);
+ MMIO_D(SDEISR);
+ MMIO_RING_D(RING_HWSTAM);
+ MMIO_D(BSD_HWS_PGA_GEN7);
+ MMIO_D(BLT_HWS_PGA_GEN7);
+ MMIO_D(VEBOX_HWS_PGA_GEN7);
+
+#define RING_REG(base) _MMIO((base) + 0x28)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x134)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x6c)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+ MMIO_D(_MMIO(0x2148));
+ MMIO_D(CCID(RENDER_RING_BASE));
+ MMIO_D(_MMIO(0x12198));
+ MMIO_D(GEN7_CXT_SIZE);
+ MMIO_RING_D(RING_TAIL);
+ MMIO_RING_D(RING_HEAD);
+ MMIO_RING_D(RING_CTL);
+ MMIO_RING_D(RING_ACTHD);
+ MMIO_RING_D(RING_START);
+
+ /* RING MODE */
+#define RING_REG(base) _MMIO((base) + 0x29c)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+ MMIO_RING_D(RING_MI_MODE);
+ MMIO_RING_D(RING_INSTPM);
+ MMIO_RING_D(RING_TIMESTAMP);
+ MMIO_RING_D(RING_TIMESTAMP_UDW);
+ MMIO_D(GEN7_GT_MODE);
+ MMIO_D(CACHE_MODE_0_GEN7);
+ MMIO_D(CACHE_MODE_1);
+ MMIO_D(CACHE_MODE_0);
+ MMIO_D(_MMIO(0x2124));
+ MMIO_D(_MMIO(0x20dc));
+ MMIO_D(_3D_CHICKEN3);
+ MMIO_D(_MMIO(0x2088));
+ MMIO_D(FF_SLICE_CS_CHICKEN2);
+ MMIO_D(_MMIO(0x2470));
+ MMIO_D(GAM_ECOCHK);
+ MMIO_D(GEN7_COMMON_SLICE_CHICKEN1);
+ MMIO_D(COMMON_SLICE_CHICKEN2);
+ MMIO_D(_MMIO(0x9030));
+ MMIO_D(_MMIO(0x20a0));
+ MMIO_D(_MMIO(0x2420));
+ MMIO_D(_MMIO(0x2430));
+ MMIO_D(_MMIO(0x2434));
+ MMIO_D(_MMIO(0x2438));
+ MMIO_D(_MMIO(0x243c));
+ MMIO_D(_MMIO(0x7018));
+ MMIO_D(HALF_SLICE_CHICKEN3);
+ MMIO_D(GEN7_HALF_SLICE_CHICKEN1);
+ /* display */
+ MMIO_F(_MMIO(0x60220), 0x20);
+ MMIO_D(_MMIO(0x602a0));
+ MMIO_D(_MMIO(0x65050));
+ MMIO_D(_MMIO(0x650b4));
+ MMIO_D(_MMIO(0xc4040));
+ MMIO_D(DERRMR);
+ MMIO_D(PIPEDSL(PIPE_A));
+ MMIO_D(PIPEDSL(PIPE_B));
+ MMIO_D(PIPEDSL(PIPE_C));
+ MMIO_D(PIPEDSL(_PIPE_EDP));
+ MMIO_D(PIPECONF(PIPE_A));
+ MMIO_D(PIPECONF(PIPE_B));
+ MMIO_D(PIPECONF(PIPE_C));
+ MMIO_D(PIPECONF(_PIPE_EDP));
+ MMIO_D(PIPESTAT(PIPE_A));
+ MMIO_D(PIPESTAT(PIPE_B));
+ MMIO_D(PIPESTAT(PIPE_C));
+ MMIO_D(PIPESTAT(_PIPE_EDP));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP));
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A));
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B));
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C));
+ MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP));
+ MMIO_D(CURCNTR(PIPE_A));
+ MMIO_D(CURCNTR(PIPE_B));
+ MMIO_D(CURCNTR(PIPE_C));
+ MMIO_D(CURPOS(PIPE_A));
+ MMIO_D(CURPOS(PIPE_B));
+ MMIO_D(CURPOS(PIPE_C));
+ MMIO_D(CURBASE(PIPE_A));
+ MMIO_D(CURBASE(PIPE_B));
+ MMIO_D(CURBASE(PIPE_C));
+ MMIO_D(CUR_FBC_CTL(PIPE_A));
+ MMIO_D(CUR_FBC_CTL(PIPE_B));
+ MMIO_D(CUR_FBC_CTL(PIPE_C));
+ MMIO_D(_MMIO(0x700ac));
+ MMIO_D(_MMIO(0x710ac));
+ MMIO_D(_MMIO(0x720ac));
+ MMIO_D(_MMIO(0x70090));
+ MMIO_D(_MMIO(0x70094));
+ MMIO_D(_MMIO(0x70098));
+ MMIO_D(_MMIO(0x7009c));
+ MMIO_D(DSPCNTR(PIPE_A));
+ MMIO_D(DSPADDR(PIPE_A));
+ MMIO_D(DSPSTRIDE(PIPE_A));
+ MMIO_D(DSPPOS(PIPE_A));
+ MMIO_D(DSPSIZE(PIPE_A));
+ MMIO_D(DSPSURF(PIPE_A));
+ MMIO_D(DSPOFFSET(PIPE_A));
+ MMIO_D(DSPSURFLIVE(PIPE_A));
+ MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY));
+ MMIO_D(DSPCNTR(PIPE_B));
+ MMIO_D(DSPADDR(PIPE_B));
+ MMIO_D(DSPSTRIDE(PIPE_B));
+ MMIO_D(DSPPOS(PIPE_B));
+ MMIO_D(DSPSIZE(PIPE_B));
+ MMIO_D(DSPSURF(PIPE_B));
+ MMIO_D(DSPOFFSET(PIPE_B));
+ MMIO_D(DSPSURFLIVE(PIPE_B));
+ MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY));
+ MMIO_D(DSPCNTR(PIPE_C));
+ MMIO_D(DSPADDR(PIPE_C));
+ MMIO_D(DSPSTRIDE(PIPE_C));
+ MMIO_D(DSPPOS(PIPE_C));
+ MMIO_D(DSPSIZE(PIPE_C));
+ MMIO_D(DSPSURF(PIPE_C));
+ MMIO_D(DSPOFFSET(PIPE_C));
+ MMIO_D(DSPSURFLIVE(PIPE_C));
+ MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY));
+ MMIO_D(SPRCTL(PIPE_A));
+ MMIO_D(SPRLINOFF(PIPE_A));
+ MMIO_D(SPRSTRIDE(PIPE_A));
+ MMIO_D(SPRPOS(PIPE_A));
+ MMIO_D(SPRSIZE(PIPE_A));
+ MMIO_D(SPRKEYVAL(PIPE_A));
+ MMIO_D(SPRKEYMSK(PIPE_A));
+ MMIO_D(SPRSURF(PIPE_A));
+ MMIO_D(SPRKEYMAX(PIPE_A));
+ MMIO_D(SPROFFSET(PIPE_A));
+ MMIO_D(SPRSCALE(PIPE_A));
+ MMIO_D(SPRSURFLIVE(PIPE_A));
+ MMIO_D(REG_50080(PIPE_A, PLANE_SPRITE0));
+ MMIO_D(SPRCTL(PIPE_B));
+ MMIO_D(SPRLINOFF(PIPE_B));
+ MMIO_D(SPRSTRIDE(PIPE_B));
+ MMIO_D(SPRPOS(PIPE_B));
+ MMIO_D(SPRSIZE(PIPE_B));
+ MMIO_D(SPRKEYVAL(PIPE_B));
+ MMIO_D(SPRKEYMSK(PIPE_B));
+ MMIO_D(SPRSURF(PIPE_B));
+ MMIO_D(SPRKEYMAX(PIPE_B));
+ MMIO_D(SPROFFSET(PIPE_B));
+ MMIO_D(SPRSCALE(PIPE_B));
+ MMIO_D(SPRSURFLIVE(PIPE_B));
+ MMIO_D(REG_50080(PIPE_B, PLANE_SPRITE0));
+ MMIO_D(SPRCTL(PIPE_C));
+ MMIO_D(SPRLINOFF(PIPE_C));
+ MMIO_D(SPRSTRIDE(PIPE_C));
+ MMIO_D(SPRPOS(PIPE_C));
+ MMIO_D(SPRSIZE(PIPE_C));
+ MMIO_D(SPRKEYVAL(PIPE_C));
+ MMIO_D(SPRKEYMSK(PIPE_C));
+ MMIO_D(SPRSURF(PIPE_C));
+ MMIO_D(SPRKEYMAX(PIPE_C));
+ MMIO_D(SPROFFSET(PIPE_C));
+ MMIO_D(SPRSCALE(PIPE_C));
+ MMIO_D(SPRSURFLIVE(PIPE_C));
+ MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
+ MMIO_D(HTOTAL(TRANSCODER_A));
+ MMIO_D(HBLANK(TRANSCODER_A));
+ MMIO_D(HSYNC(TRANSCODER_A));
+ MMIO_D(VTOTAL(TRANSCODER_A));
+ MMIO_D(VBLANK(TRANSCODER_A));
+ MMIO_D(VSYNC(TRANSCODER_A));
+ MMIO_D(BCLRPAT(TRANSCODER_A));
+ MMIO_D(VSYNCSHIFT(TRANSCODER_A));
+ MMIO_D(PIPESRC(TRANSCODER_A));
+ MMIO_D(HTOTAL(TRANSCODER_B));
+ MMIO_D(HBLANK(TRANSCODER_B));
+ MMIO_D(HSYNC(TRANSCODER_B));
+ MMIO_D(VTOTAL(TRANSCODER_B));
+ MMIO_D(VBLANK(TRANSCODER_B));
+ MMIO_D(VSYNC(TRANSCODER_B));
+ MMIO_D(BCLRPAT(TRANSCODER_B));
+ MMIO_D(VSYNCSHIFT(TRANSCODER_B));
+ MMIO_D(PIPESRC(TRANSCODER_B));
+ MMIO_D(HTOTAL(TRANSCODER_C));
+ MMIO_D(HBLANK(TRANSCODER_C));
+ MMIO_D(HSYNC(TRANSCODER_C));
+ MMIO_D(VTOTAL(TRANSCODER_C));
+ MMIO_D(VBLANK(TRANSCODER_C));
+ MMIO_D(VSYNC(TRANSCODER_C));
+ MMIO_D(BCLRPAT(TRANSCODER_C));
+ MMIO_D(VSYNCSHIFT(TRANSCODER_C));
+ MMIO_D(PIPESRC(TRANSCODER_C));
+ MMIO_D(HTOTAL(TRANSCODER_EDP));
+ MMIO_D(HBLANK(TRANSCODER_EDP));
+ MMIO_D(HSYNC(TRANSCODER_EDP));
+ MMIO_D(VTOTAL(TRANSCODER_EDP));
+ MMIO_D(VBLANK(TRANSCODER_EDP));
+ MMIO_D(VSYNC(TRANSCODER_EDP));
+ MMIO_D(BCLRPAT(TRANSCODER_EDP));
+ MMIO_D(VSYNCSHIFT(TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_A));
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_A));
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_A));
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_A));
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_A));
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_A));
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_A));
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_A));
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_B));
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_B));
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_B));
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_B));
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_B));
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_B));
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_B));
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_B));
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_C));
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_C));
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_C));
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_C));
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_C));
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_C));
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_C));
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_C));
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP));
+ MMIO_D(PF_CTL(PIPE_A));
+ MMIO_D(PF_WIN_SZ(PIPE_A));
+ MMIO_D(PF_WIN_POS(PIPE_A));
+ MMIO_D(PF_VSCALE(PIPE_A));
+ MMIO_D(PF_HSCALE(PIPE_A));
+ MMIO_D(PF_CTL(PIPE_B));
+ MMIO_D(PF_WIN_SZ(PIPE_B));
+ MMIO_D(PF_WIN_POS(PIPE_B));
+ MMIO_D(PF_VSCALE(PIPE_B));
+ MMIO_D(PF_HSCALE(PIPE_B));
+ MMIO_D(PF_CTL(PIPE_C));
+ MMIO_D(PF_WIN_SZ(PIPE_C));
+ MMIO_D(PF_WIN_POS(PIPE_C));
+ MMIO_D(PF_VSCALE(PIPE_C));
+ MMIO_D(PF_HSCALE(PIPE_C));
+ MMIO_D(WM0_PIPE_ILK(PIPE_A));
+ MMIO_D(WM0_PIPE_ILK(PIPE_B));
+ MMIO_D(WM0_PIPE_ILK(PIPE_C));
+ MMIO_D(WM1_LP_ILK);
+ MMIO_D(WM2_LP_ILK);
+ MMIO_D(WM3_LP_ILK);
+ MMIO_D(WM1S_LP_ILK);
+ MMIO_D(WM2S_LP_IVB);
+ MMIO_D(WM3S_LP_IVB);
+ MMIO_D(BLC_PWM_CPU_CTL2);
+ MMIO_D(BLC_PWM_CPU_CTL);
+ MMIO_D(BLC_PWM_PCH_CTL1);
+ MMIO_D(BLC_PWM_PCH_CTL2);
+ MMIO_D(_MMIO(0x48268));
+ MMIO_F(PCH_GMBUS0, 4 * 4);
+ MMIO_F(PCH_GPIO_BASE, 6 * 4);
+ MMIO_F(_MMIO(0xe4f00), 0x28);
+ MMIO_D(_MMIO(_PCH_TRANSACONF));
+ MMIO_D(_MMIO(_PCH_TRANSBCONF));
+ MMIO_D(FDI_RX_IIR(PIPE_A));
+ MMIO_D(FDI_RX_IIR(PIPE_B));
+ MMIO_D(FDI_RX_IIR(PIPE_C));
+ MMIO_D(FDI_RX_IMR(PIPE_A));
+ MMIO_D(FDI_RX_IMR(PIPE_B));
+ MMIO_D(FDI_RX_IMR(PIPE_C));
+ MMIO_D(FDI_RX_CTL(PIPE_A));
+ MMIO_D(FDI_RX_CTL(PIPE_B));
+ MMIO_D(FDI_RX_CTL(PIPE_C));
+ MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A));
+ MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A));
+ MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A));
+ MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A));
+ MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A));
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A));
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A));
+ MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B));
+ MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B));
+ MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B));
+ MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B));
+ MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B));
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B));
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B));
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1));
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1));
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2));
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2));
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1));
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1));
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2));
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2));
+ MMIO_D(TRANS_DP_CTL(PIPE_A));
+ MMIO_D(TRANS_DP_CTL(PIPE_B));
+ MMIO_D(TRANS_DP_CTL(PIPE_C));
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_A));
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_A));
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_A));
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_B));
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_B));
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_B));
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_C));
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_C));
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_C));
+ MMIO_D(_MMIO(_FDI_RXA_MISC));
+ MMIO_D(_MMIO(_FDI_RXB_MISC));
+ MMIO_D(_MMIO(_FDI_RXA_TUSIZE1));
+ MMIO_D(_MMIO(_FDI_RXA_TUSIZE2));
+ MMIO_D(_MMIO(_FDI_RXB_TUSIZE1));
+ MMIO_D(_MMIO(_FDI_RXB_TUSIZE2));
+ MMIO_D(PCH_PP_CONTROL);
+ MMIO_D(PCH_PP_DIVISOR);
+ MMIO_D(PCH_PP_STATUS);
+ MMIO_D(PCH_LVDS);
+ MMIO_D(_MMIO(_PCH_DPLL_A));
+ MMIO_D(_MMIO(_PCH_DPLL_B));
+ MMIO_D(_MMIO(_PCH_FPA0));
+ MMIO_D(_MMIO(_PCH_FPA1));
+ MMIO_D(_MMIO(_PCH_FPB0));
+ MMIO_D(_MMIO(_PCH_FPB1));
+ MMIO_D(PCH_DREF_CONTROL);
+ MMIO_D(PCH_RAWCLK_FREQ);
+ MMIO_D(PCH_DPLL_SEL);
+ MMIO_D(_MMIO(0x61208));
+ MMIO_D(_MMIO(0x6120c));
+ MMIO_D(PCH_PP_ON_DELAYS);
+ MMIO_D(PCH_PP_OFF_DELAYS);
+ MMIO_D(_MMIO(0xe651c));
+ MMIO_D(_MMIO(0xe661c));
+ MMIO_D(_MMIO(0xe671c));
+ MMIO_D(_MMIO(0xe681c));
+ MMIO_D(_MMIO(0xe6c04));
+ MMIO_D(_MMIO(0xe6e1c));
+ MMIO_D(PCH_PORT_HOTPLUG);
+ MMIO_D(LCPLL_CTL);
+ MMIO_D(FUSE_STRAP);
+ MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL);
+ MMIO_D(DISP_ARB_CTL);
+ MMIO_D(DISP_ARB_CTL2);
+ MMIO_D(ILK_DISPLAY_CHICKEN1);
+ MMIO_D(ILK_DISPLAY_CHICKEN2);
+ MMIO_D(ILK_DSPCLK_GATE_D);
+ MMIO_D(SOUTH_CHICKEN1);
+ MMIO_D(SOUTH_CHICKEN2);
+ MMIO_D(_MMIO(_TRANSA_CHICKEN1));
+ MMIO_D(_MMIO(_TRANSB_CHICKEN1));
+ MMIO_D(SOUTH_DSPCLK_GATE_D);
+ MMIO_D(_MMIO(_TRANSA_CHICKEN2));
+ MMIO_D(_MMIO(_TRANSB_CHICKEN2));
+ MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A));
+ MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A));
+ MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A));
+ MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A));
+ MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A));
+ MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A));
+ MMIO_D(ILK_FBC_RT_BASE);
+ MMIO_D(IPS_CTL);
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A));
+ MMIO_D(PIPE_CSC_MODE(PIPE_A));
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A));
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A));
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A));
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A));
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A));
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B));
+ MMIO_D(PIPE_CSC_MODE(PIPE_B));
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B));
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B));
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B));
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B));
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B));
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C));
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C));
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C));
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C));
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C));
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C));
+ MMIO_D(PIPE_CSC_MODE(PIPE_C));
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C));
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C));
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C));
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C));
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C));
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C));
+ MMIO_D(PREC_PAL_INDEX(PIPE_A));
+ MMIO_D(PREC_PAL_DATA(PIPE_A));
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3);
+ MMIO_D(PREC_PAL_INDEX(PIPE_B));
+ MMIO_D(PREC_PAL_DATA(PIPE_B));
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3);
+ MMIO_D(PREC_PAL_INDEX(PIPE_C));
+ MMIO_D(PREC_PAL_DATA(PIPE_C));
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3);
+ MMIO_D(_MMIO(0x60110));
+ MMIO_D(_MMIO(0x61110));
+ MMIO_F(_MMIO(0x70400), 0x40);
+ MMIO_F(_MMIO(0x71400), 0x40);
+ MMIO_F(_MMIO(0x72400), 0x40);
+ MMIO_D(WM_LINETIME(PIPE_A));
+ MMIO_D(WM_LINETIME(PIPE_B));
+ MMIO_D(WM_LINETIME(PIPE_C));
+ MMIO_D(SPLL_CTL);
+ MMIO_D(_MMIO(_WRPLL_CTL1));
+ MMIO_D(_MMIO(_WRPLL_CTL2));
+ MMIO_D(PORT_CLK_SEL(PORT_A));
+ MMIO_D(PORT_CLK_SEL(PORT_B));
+ MMIO_D(PORT_CLK_SEL(PORT_C));
+ MMIO_D(PORT_CLK_SEL(PORT_D));
+ MMIO_D(PORT_CLK_SEL(PORT_E));
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_A));
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_B));
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_C));
+ MMIO_D(HSW_NDE_RSTWRN_OPT);
+ MMIO_D(_MMIO(0x46508));
+ MMIO_D(_MMIO(0x49080));
+ MMIO_D(_MMIO(0x49180));
+ MMIO_D(_MMIO(0x49280));
+ MMIO_F(_MMIO(0x49090), 0x14);
+ MMIO_F(_MMIO(0x49190), 0x14);
+ MMIO_F(_MMIO(0x49290), 0x14);
+ MMIO_D(GAMMA_MODE(PIPE_A));
+ MMIO_D(GAMMA_MODE(PIPE_B));
+ MMIO_D(GAMMA_MODE(PIPE_C));
+ MMIO_D(PIPE_MULT(PIPE_A));
+ MMIO_D(PIPE_MULT(PIPE_B));
+ MMIO_D(PIPE_MULT(PIPE_C));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C));
+ MMIO_D(SFUSE_STRAP);
+ MMIO_D(SBI_ADDR);
+ MMIO_D(SBI_DATA);
+ MMIO_D(SBI_CTL_STAT);
+ MMIO_D(PIXCLK_GATE);
+ MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4);
+ MMIO_D(DDI_BUF_CTL(PORT_A));
+ MMIO_D(DDI_BUF_CTL(PORT_B));
+ MMIO_D(DDI_BUF_CTL(PORT_C));
+ MMIO_D(DDI_BUF_CTL(PORT_D));
+ MMIO_D(DDI_BUF_CTL(PORT_E));
+ MMIO_D(DP_TP_CTL(PORT_A));
+ MMIO_D(DP_TP_CTL(PORT_B));
+ MMIO_D(DP_TP_CTL(PORT_C));
+ MMIO_D(DP_TP_CTL(PORT_D));
+ MMIO_D(DP_TP_CTL(PORT_E));
+ MMIO_D(DP_TP_STATUS(PORT_A));
+ MMIO_D(DP_TP_STATUS(PORT_B));
+ MMIO_D(DP_TP_STATUS(PORT_C));
+ MMIO_D(DP_TP_STATUS(PORT_D));
+ MMIO_D(DP_TP_STATUS(PORT_E));
+ MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50);
+ MMIO_F(_MMIO(0x64e60), 0x50);
+ MMIO_F(_MMIO(0x64eC0), 0x50);
+ MMIO_F(_MMIO(0x64f20), 0x50);
+ MMIO_F(_MMIO(0x64f80), 0x50);
+ MMIO_D(HSW_AUD_CFG(PIPE_A));
+ MMIO_D(HSW_AUD_PIN_ELD_CP_VLD);
+ MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A));
+ MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_A));
+ MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_B));
+ MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_C));
+ MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_EDP));
+ MMIO_D(_MMIO(_TRANSA_MSA_MISC));
+ MMIO_D(_MMIO(_TRANSB_MSA_MISC));
+ MMIO_D(_MMIO(_TRANSC_MSA_MISC));
+ MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC));
+ MMIO_D(FORCEWAKE);
+ MMIO_D(FORCEWAKE_ACK);
+ MMIO_D(GEN6_GT_CORE_STATUS);
+ MMIO_D(GEN6_GT_THREAD_STATUS_REG);
+ MMIO_D(GTFIFODBG);
+ MMIO_D(GTFIFOCTL);
+ MMIO_D(ECOBUS);
+ MMIO_D(GEN6_RC_CONTROL);
+ MMIO_D(GEN6_RC_STATE);
+ MMIO_D(GEN6_RPNSWREQ);
+ MMIO_D(GEN6_RC_VIDEO_FREQ);
+ MMIO_D(GEN6_RP_DOWN_TIMEOUT);
+ MMIO_D(GEN6_RP_INTERRUPT_LIMITS);
+ MMIO_D(GEN6_RPSTAT1);
+ MMIO_D(GEN6_RP_CONTROL);
+ MMIO_D(GEN6_RP_UP_THRESHOLD);
+ MMIO_D(GEN6_RP_DOWN_THRESHOLD);
+ MMIO_D(GEN6_RP_CUR_UP_EI);
+ MMIO_D(GEN6_RP_CUR_UP);
+ MMIO_D(GEN6_RP_PREV_UP);
+ MMIO_D(GEN6_RP_CUR_DOWN_EI);
+ MMIO_D(GEN6_RP_CUR_DOWN);
+ MMIO_D(GEN6_RP_PREV_DOWN);
+ MMIO_D(GEN6_RP_UP_EI);
+ MMIO_D(GEN6_RP_DOWN_EI);
+ MMIO_D(GEN6_RP_IDLE_HYSTERSIS);
+ MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT);
+ MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT);
+ MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT);
+ MMIO_D(GEN6_RC_EVALUATION_INTERVAL);
+ MMIO_D(GEN6_RC_IDLE_HYSTERSIS);
+ MMIO_D(GEN6_RC_SLEEP);
+ MMIO_D(GEN6_RC1e_THRESHOLD);
+ MMIO_D(GEN6_RC6_THRESHOLD);
+ MMIO_D(GEN6_RC6p_THRESHOLD);
+ MMIO_D(GEN6_RC6pp_THRESHOLD);
+ MMIO_D(GEN6_PMINTRMSK);
+
+ MMIO_D(RSTDBYCTL);
+ MMIO_D(GEN6_GDRST);
+ MMIO_F(FENCE_REG_GEN6_LO(0), 0x80);
+ MMIO_D(CPU_VGACNTRL);
+ MMIO_D(TILECTL);
+ MMIO_D(GEN6_UCGCTL1);
+ MMIO_D(GEN6_UCGCTL2);
+ MMIO_F(_MMIO(0x4f000), 0x90);
+ MMIO_D(GEN6_PCODE_DATA);
+ MMIO_D(_MMIO(0x13812c));
+ MMIO_D(GEN7_ERR_INT);
+ MMIO_D(HSW_EDRAM_CAP);
+ MMIO_D(HSW_IDICR);
+ MMIO_D(GFX_FLSH_CNTL_GEN6);
+ MMIO_D(_MMIO(0x3c));
+ MMIO_D(_MMIO(0x860));
+ MMIO_D(ECOSKPD(RENDER_RING_BASE));
+ MMIO_D(_MMIO(0x121d0));
+ MMIO_D(ECOSKPD(BLT_RING_BASE));
+ MMIO_D(_MMIO(0x41d0));
+ MMIO_D(GAC_ECO_BITS);
+ MMIO_D(_MMIO(0x6200));
+ MMIO_D(_MMIO(0x6204));
+ MMIO_D(_MMIO(0x6208));
+ MMIO_D(_MMIO(0x7118));
+ MMIO_D(_MMIO(0x7180));
+ MMIO_D(_MMIO(0x7408));
+ MMIO_D(_MMIO(0x7c00));
+ MMIO_D(GEN6_MBCTL);
+ MMIO_D(_MMIO(0x911c));
+ MMIO_D(_MMIO(0x9120));
+ MMIO_D(GEN7_UCGCTL4);
+ MMIO_D(GAB_CTL);
+ MMIO_D(_MMIO(0x48800));
+ MMIO_D(_MMIO(0xce044));
+ MMIO_D(_MMIO(0xe6500));
+ MMIO_D(_MMIO(0xe6504));
+ MMIO_D(_MMIO(0xe6600));
+ MMIO_D(_MMIO(0xe6604));
+ MMIO_D(_MMIO(0xe6700));
+ MMIO_D(_MMIO(0xe6704));
+ MMIO_D(_MMIO(0xe6800));
+ MMIO_D(_MMIO(0xe6804));
+ MMIO_D(PCH_GMBUS4);
+ MMIO_D(PCH_GMBUS5);
+ MMIO_D(_MMIO(0x902c));
+ MMIO_D(_MMIO(0xec008));
+ MMIO_D(_MMIO(0xec00c));
+ MMIO_D(_MMIO(0xec008 + 0x18));
+ MMIO_D(_MMIO(0xec00c + 0x18));
+ MMIO_D(_MMIO(0xec008 + 0x18 * 2));
+ MMIO_D(_MMIO(0xec00c + 0x18 * 2));
+ MMIO_D(_MMIO(0xec008 + 0x18 * 3));
+ MMIO_D(_MMIO(0xec00c + 0x18 * 3));
+ MMIO_D(_MMIO(0xec408));
+ MMIO_D(_MMIO(0xec40c));
+ MMIO_D(_MMIO(0xec408 + 0x18));
+ MMIO_D(_MMIO(0xec40c + 0x18));
+ MMIO_D(_MMIO(0xec408 + 0x18 * 2));
+ MMIO_D(_MMIO(0xec40c + 0x18 * 2));
+ MMIO_D(_MMIO(0xec408 + 0x18 * 3));
+ MMIO_D(_MMIO(0xec40c + 0x18 * 3));
+ MMIO_D(_MMIO(0xfc810));
+ MMIO_D(_MMIO(0xfc81c));
+ MMIO_D(_MMIO(0xfc828));
+ MMIO_D(_MMIO(0xfc834));
+ MMIO_D(_MMIO(0xfcc00));
+ MMIO_D(_MMIO(0xfcc0c));
+ MMIO_D(_MMIO(0xfcc18));
+ MMIO_D(_MMIO(0xfcc24));
+ MMIO_D(_MMIO(0xfd000));
+ MMIO_D(_MMIO(0xfd00c));
+ MMIO_D(_MMIO(0xfd018));
+ MMIO_D(_MMIO(0xfd024));
+ MMIO_D(_MMIO(0xfd034));
+ MMIO_D(FPGA_DBG);
+ MMIO_D(_MMIO(0x2054));
+ MMIO_D(_MMIO(0x12054));
+ MMIO_D(_MMIO(0x22054));
+ MMIO_D(_MMIO(0x1a054));
+ MMIO_D(_MMIO(0x44070));
+ MMIO_D(_MMIO(0x2178));
+ MMIO_D(_MMIO(0x217c));
+ MMIO_D(_MMIO(0x12178));
+ MMIO_D(_MMIO(0x1217c));
+ MMIO_F(_MMIO(0x5200), 32);
+ MMIO_F(_MMIO(0x5240), 32);
+ MMIO_F(_MMIO(0x5280), 16);
+ MMIO_D(BCS_SWCTRL);
+ MMIO_F(HS_INVOCATION_COUNT, 8);
+ MMIO_F(DS_INVOCATION_COUNT, 8);
+ MMIO_F(IA_VERTICES_COUNT, 8);
+ MMIO_F(IA_PRIMITIVES_COUNT, 8);
+ MMIO_F(VS_INVOCATION_COUNT, 8);
+ MMIO_F(GS_INVOCATION_COUNT, 8);
+ MMIO_F(GS_PRIMITIVES_COUNT, 8);
+ MMIO_F(CL_INVOCATION_COUNT, 8);
+ MMIO_F(CL_PRIMITIVES_COUNT, 8);
+ MMIO_F(PS_INVOCATION_COUNT, 8);
+ MMIO_F(PS_DEPTH_COUNT, 8);
+ MMIO_D(ARB_MODE);
+ MMIO_RING_D(RING_BBADDR);
+ MMIO_D(_MMIO(0x2220));
+ MMIO_D(_MMIO(0x12220));
+ MMIO_D(_MMIO(0x22220));
+ MMIO_RING_D(RING_SYNC_1);
+ MMIO_RING_D(RING_SYNC_0);
+ MMIO_D(GUC_STATUS);
+
+ MMIO_F(_MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000);
+ MMIO_F(_MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE);
+ MMIO_F(LGC_PALETTE(PIPE_A, 0), 1024);
+ MMIO_F(LGC_PALETTE(PIPE_B, 0), 1024);
+ MMIO_F(LGC_PALETTE(PIPE_C, 0), 1024);
+
+ return 0;
+}
+
+static int iterate_bdw_only_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ MMIO_D(HSW_PWR_WELL_CTL1);
+ MMIO_D(HSW_PWR_WELL_CTL2);
+ MMIO_D(HSW_PWR_WELL_CTL3);
+ MMIO_D(HSW_PWR_WELL_CTL4);
+ MMIO_D(HSW_PWR_WELL_CTL5);
+ MMIO_D(HSW_PWR_WELL_CTL6);
+
+ MMIO_D(WM_MISC);
+ MMIO_D(_MMIO(_SRD_CTL_EDP));
+
+ MMIO_D(_MMIO(0xb1f0));
+ MMIO_D(_MMIO(0xb1c0));
+ MMIO_D(_MMIO(0xb100));
+ MMIO_D(_MMIO(0xb10c));
+ MMIO_D(_MMIO(0xb110));
+ MMIO_D(_MMIO(0x83a4));
+ MMIO_D(_MMIO(0x8430));
+ MMIO_D(_MMIO(0x2248));
+ MMIO_D(FORCEWAKE_ACK_HSW);
+
+ return 0;
+}
+
+static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ struct drm_i915_private *dev_priv = iter->i915;
+
+ MMIO_D(GEN8_GT_IMR(0));
+ MMIO_D(GEN8_GT_IER(0));
+ MMIO_D(GEN8_GT_IIR(0));
+ MMIO_D(GEN8_GT_ISR(0));
+ MMIO_D(GEN8_GT_IMR(1));
+ MMIO_D(GEN8_GT_IER(1));
+ MMIO_D(GEN8_GT_IIR(1));
+ MMIO_D(GEN8_GT_ISR(1));
+ MMIO_D(GEN8_GT_IMR(2));
+ MMIO_D(GEN8_GT_IER(2));
+ MMIO_D(GEN8_GT_IIR(2));
+ MMIO_D(GEN8_GT_ISR(2));
+ MMIO_D(GEN8_GT_IMR(3));
+ MMIO_D(GEN8_GT_IER(3));
+ MMIO_D(GEN8_GT_IIR(3));
+ MMIO_D(GEN8_GT_ISR(3));
+ MMIO_D(GEN8_DE_PIPE_IMR(PIPE_A));
+ MMIO_D(GEN8_DE_PIPE_IER(PIPE_A));
+ MMIO_D(GEN8_DE_PIPE_IIR(PIPE_A));
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A));
+ MMIO_D(GEN8_DE_PIPE_IMR(PIPE_B));
+ MMIO_D(GEN8_DE_PIPE_IER(PIPE_B));
+ MMIO_D(GEN8_DE_PIPE_IIR(PIPE_B));
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B));
+ MMIO_D(GEN8_DE_PIPE_IMR(PIPE_C));
+ MMIO_D(GEN8_DE_PIPE_IER(PIPE_C));
+ MMIO_D(GEN8_DE_PIPE_IIR(PIPE_C));
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C));
+ MMIO_D(GEN8_DE_PORT_IMR);
+ MMIO_D(GEN8_DE_PORT_IER);
+ MMIO_D(GEN8_DE_PORT_IIR);
+ MMIO_D(GEN8_DE_PORT_ISR);
+ MMIO_D(GEN8_DE_MISC_IMR);
+ MMIO_D(GEN8_DE_MISC_IER);
+ MMIO_D(GEN8_DE_MISC_IIR);
+ MMIO_D(GEN8_DE_MISC_ISR);
+ MMIO_D(GEN8_PCU_IMR);
+ MMIO_D(GEN8_PCU_IER);
+ MMIO_D(GEN8_PCU_IIR);
+ MMIO_D(GEN8_PCU_ISR);
+ MMIO_D(GEN8_MASTER_IRQ);
+ MMIO_RING_D(RING_ACTHD_UDW);
+
+#define RING_REG(base) _MMIO((base) + 0xd0)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x230)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x234)
+ MMIO_RING_F(RING_REG, 8);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x244)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x370)
+ MMIO_RING_F(RING_REG, 48);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x3a0)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+ MMIO_D(PIPEMISC(PIPE_A));
+ MMIO_D(PIPEMISC(PIPE_B));
+ MMIO_D(PIPEMISC(PIPE_C));
+ MMIO_D(_MMIO(0x1c1d0));
+ MMIO_D(GEN6_MBCUNIT_SNPCR);
+ MMIO_D(GEN7_MISCCPCTL);
+ MMIO_D(_MMIO(0x1c054));
+ MMIO_D(GEN6_PCODE_MAILBOX);
+ if (!IS_BROXTON(dev_priv))
+ MMIO_D(GEN8_PRIVATE_PAT_LO);
+ MMIO_D(GEN8_PRIVATE_PAT_HI);
+ MMIO_D(GAMTARBMODE);
+
+#define RING_REG(base) _MMIO((base) + 0x270)
+ MMIO_RING_F(RING_REG, 32);
+#undef RING_REG
+
+ MMIO_RING_D(RING_HWS_PGA);
+ MMIO_D(HDC_CHICKEN0);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_A));
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_B));
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_C));
+ MMIO_D(_MMIO(0x6671c));
+ MMIO_D(_MMIO(0x66c00));
+ MMIO_D(_MMIO(0x66c04));
+ MMIO_D(HSW_GTT_CACHE_EN);
+ MMIO_D(GEN8_EU_DISABLE0);
+ MMIO_D(GEN8_EU_DISABLE1);
+ MMIO_D(GEN8_EU_DISABLE2);
+ MMIO_D(_MMIO(0xfdc));
+ MMIO_D(GEN8_ROW_CHICKEN);
+ MMIO_D(GEN7_ROW_CHICKEN2);
+ MMIO_D(GEN8_UCGCTL6);
+ MMIO_D(GEN8_L3SQCREG4);
+ MMIO_D(GEN9_SCRATCH_LNCF1);
+ MMIO_F(_MMIO(0x24d0), 48);
+ MMIO_D(_MMIO(0x44484));
+ MMIO_D(_MMIO(0x4448c));
+ MMIO_D(GEN8_L3_LRA_1_GPGPU);
+ MMIO_D(_MMIO(0x110000));
+ MMIO_D(_MMIO(0x48400));
+ MMIO_D(_MMIO(0x6e570));
+ MMIO_D(_MMIO(0x65f10));
+ MMIO_D(_MMIO(0xe194));
+ MMIO_D(_MMIO(0xe188));
+ MMIO_D(HALF_SLICE_CHICKEN2);
+ MMIO_D(_MMIO(0x2580));
+ MMIO_D(_MMIO(0xe220));
+ MMIO_D(_MMIO(0xe230));
+ MMIO_D(_MMIO(0xe240));
+ MMIO_D(_MMIO(0xe260));
+ MMIO_D(_MMIO(0xe270));
+ MMIO_D(_MMIO(0xe280));
+ MMIO_D(_MMIO(0xe2a0));
+ MMIO_D(_MMIO(0xe2b0));
+ MMIO_D(_MMIO(0xe2c0));
+ MMIO_D(_MMIO(0x21f0));
+ MMIO_D(GEN8_GAMW_ECO_DEV_RW_IA);
+ MMIO_D(_MMIO(0x215c));
+ MMIO_F(_MMIO(0x2290), 8);
+ MMIO_D(_MMIO(0x2b00));
+ MMIO_D(_MMIO(0x2360));
+ MMIO_D(_MMIO(0x1c17c));
+ MMIO_D(_MMIO(0x1c178));
+ MMIO_D(_MMIO(0x4260));
+ MMIO_D(_MMIO(0x4264));
+ MMIO_D(_MMIO(0x4268));
+ MMIO_D(_MMIO(0x426c));
+ MMIO_D(_MMIO(0x4270));
+ MMIO_D(_MMIO(0x4094));
+ MMIO_D(_MMIO(0x22178));
+ MMIO_D(_MMIO(0x1a178));
+ MMIO_D(_MMIO(0x1a17c));
+ MMIO_D(_MMIO(0x2217c));
+ MMIO_D(EDP_PSR_IMR);
+ MMIO_D(EDP_PSR_IIR);
+ MMIO_D(_MMIO(0xe4cc));
+ MMIO_D(GEN7_SC_INSTDONE);
+
+ return 0;
+}
+
+static int iterate_pre_skl_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ MMIO_D(FORCEWAKE_MT);
+
+ MMIO_D(PCH_ADPA);
+ MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4);
+ MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4);
+ MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4);
+
+ MMIO_F(_MMIO(0x70440), 0xc);
+ MMIO_F(_MMIO(0x71440), 0xc);
+ MMIO_F(_MMIO(0x72440), 0xc);
+ MMIO_F(_MMIO(0x7044c), 0xc);
+ MMIO_F(_MMIO(0x7144c), 0xc);
+ MMIO_F(_MMIO(0x7244c), 0xc);
+
+ return 0;
+}
+
+static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ struct drm_i915_private *dev_priv = iter->i915;
+
+ MMIO_D(FORCEWAKE_RENDER_GEN9);
+ MMIO_D(FORCEWAKE_ACK_RENDER_GEN9);
+ MMIO_D(FORCEWAKE_GT_GEN9);
+ MMIO_D(FORCEWAKE_ACK_GT_GEN9);
+ MMIO_D(FORCEWAKE_MEDIA_GEN9);
+ MMIO_D(FORCEWAKE_ACK_MEDIA_GEN9);
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4);
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4);
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4);
+ MMIO_D(HSW_PWR_WELL_CTL1);
+ MMIO_D(HSW_PWR_WELL_CTL2);
+ MMIO_D(DBUF_CTL_S(0));
+ MMIO_D(GEN9_PG_ENABLE);
+ MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS);
+ MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS);
+ MMIO_D(GEN9_GAMT_ECO_REG_RW_IA);
+ MMIO_D(MMCD_MISC_CTRL);
+ MMIO_D(CHICKEN_PAR1_1);
+ MMIO_D(DC_STATE_EN);
+ MMIO_D(DC_STATE_DEBUG);
+ MMIO_D(CDCLK_CTL);
+ MMIO_D(LCPLL1_CTL);
+ MMIO_D(LCPLL2_CTL);
+ MMIO_D(_MMIO(_DPLL1_CFGCR1));
+ MMIO_D(_MMIO(_DPLL2_CFGCR1));
+ MMIO_D(_MMIO(_DPLL3_CFGCR1));
+ MMIO_D(_MMIO(_DPLL1_CFGCR2));
+ MMIO_D(_MMIO(_DPLL2_CFGCR2));
+ MMIO_D(_MMIO(_DPLL3_CFGCR2));
+ MMIO_D(DPLL_CTRL1);
+ MMIO_D(DPLL_CTRL2);
+ MMIO_D(DPLL_STATUS);
+ MMIO_D(SKL_PS_WIN_POS(PIPE_A, 0));
+ MMIO_D(SKL_PS_WIN_POS(PIPE_A, 1));
+ MMIO_D(SKL_PS_WIN_POS(PIPE_B, 0));
+ MMIO_D(SKL_PS_WIN_POS(PIPE_B, 1));
+ MMIO_D(SKL_PS_WIN_POS(PIPE_C, 0));
+ MMIO_D(SKL_PS_WIN_POS(PIPE_C, 1));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 0));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 1));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 0));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 1));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 0));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 1));
+ MMIO_D(SKL_PS_CTRL(PIPE_A, 0));
+ MMIO_D(SKL_PS_CTRL(PIPE_A, 1));
+ MMIO_D(SKL_PS_CTRL(PIPE_B, 0));
+ MMIO_D(SKL_PS_CTRL(PIPE_B, 1));
+ MMIO_D(SKL_PS_CTRL(PIPE_C, 0));
+ MMIO_D(SKL_PS_CTRL(PIPE_C, 1));
+ MMIO_D(PLANE_BUF_CFG(PIPE_A, 0));
+ MMIO_D(PLANE_BUF_CFG(PIPE_A, 1));
+ MMIO_D(PLANE_BUF_CFG(PIPE_A, 2));
+ MMIO_D(PLANE_BUF_CFG(PIPE_A, 3));
+ MMIO_D(PLANE_BUF_CFG(PIPE_B, 0));
+ MMIO_D(PLANE_BUF_CFG(PIPE_B, 1));
+ MMIO_D(PLANE_BUF_CFG(PIPE_B, 2));
+ MMIO_D(PLANE_BUF_CFG(PIPE_B, 3));
+ MMIO_D(PLANE_BUF_CFG(PIPE_C, 0));
+ MMIO_D(PLANE_BUF_CFG(PIPE_C, 1));
+ MMIO_D(PLANE_BUF_CFG(PIPE_C, 2));
+ MMIO_D(PLANE_BUF_CFG(PIPE_C, 3));
+ MMIO_D(CUR_BUF_CFG(PIPE_A));
+ MMIO_D(CUR_BUF_CFG(PIPE_B));
+ MMIO_D(CUR_BUF_CFG(PIPE_C));
+ MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8);
+ MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8);
+ MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8);
+ MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8);
+ MMIO_D(PLANE_WM_TRANS(PIPE_A, 0));
+ MMIO_D(PLANE_WM_TRANS(PIPE_A, 1));
+ MMIO_D(PLANE_WM_TRANS(PIPE_A, 2));
+ MMIO_D(PLANE_WM_TRANS(PIPE_B, 0));
+ MMIO_D(PLANE_WM_TRANS(PIPE_B, 1));
+ MMIO_D(PLANE_WM_TRANS(PIPE_B, 2));
+ MMIO_D(PLANE_WM_TRANS(PIPE_C, 0));
+ MMIO_D(PLANE_WM_TRANS(PIPE_C, 1));
+ MMIO_D(PLANE_WM_TRANS(PIPE_C, 2));
+ MMIO_D(CUR_WM_TRANS(PIPE_A));
+ MMIO_D(CUR_WM_TRANS(PIPE_B));
+ MMIO_D(CUR_WM_TRANS(PIPE_C));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 0));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 1));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 2));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 3));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 0));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 1));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 2));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 3));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 0));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 1));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 2));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 3));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_A, 1)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_A, 2)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_A, 3)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_A, 4)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_B, 1)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_B, 2)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_B, 3)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_B, 4)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_C, 1)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_C, 2)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_C, 3)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_C, 4)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_A, 1)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_A, 2)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_A, 3)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_A, 4)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_B, 1)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_B, 2)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_B, 3)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_B, 4)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_C, 1)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_C, 2)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_C, 3)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_C, 4)));
+ MMIO_D(_MMIO(_PLANE_CTL_3_A));
+ MMIO_D(_MMIO(_PLANE_CTL_3_B));
+ MMIO_D(_MMIO(0x72380));
+ MMIO_D(_MMIO(0x7239c));
+ MMIO_D(_MMIO(_PLANE_SURF_3_A));
+ MMIO_D(_MMIO(_PLANE_SURF_3_B));
+ MMIO_D(DMC_SSP_BASE);
+ MMIO_D(DMC_HTP_SKL);
+ MMIO_D(DMC_LAST_WRITE);
+ MMIO_D(BDW_SCRATCH1);
+ MMIO_D(SKL_DFSM);
+ MMIO_D(DISPIO_CR_TX_BMU_CR0);
+ MMIO_F(GEN9_GFX_MOCS(0), 0x7f8);
+ MMIO_F(GEN7_L3CNTLREG2, 0x80);
+ MMIO_D(RPM_CONFIG0);
+ MMIO_D(_MMIO(0xd08));
+ MMIO_D(RC6_LOCATION);
+ MMIO_D(GEN7_FF_SLICE_CS_CHICKEN1);
+ MMIO_D(GEN9_CS_DEBUG_MODE1);
+ /* TRTT */
+ MMIO_D(TRVATTL3PTRDW(0));
+ MMIO_D(TRVATTL3PTRDW(1));
+ MMIO_D(TRVATTL3PTRDW(2));
+ MMIO_D(TRVATTL3PTRDW(3));
+ MMIO_D(TRVADR);
+ MMIO_D(TRTTE);
+ MMIO_D(_MMIO(0x4dfc));
+ MMIO_D(_MMIO(0x46430));
+ MMIO_D(_MMIO(0x46520));
+ MMIO_D(_MMIO(0xc403c));
+ MMIO_D(GEN8_GARBCNTL);
+ MMIO_D(DMA_CTRL);
+ MMIO_D(_MMIO(0x65900));
+ MMIO_D(GEN6_STOLEN_RESERVED);
+ MMIO_D(_MMIO(0x4068));
+ MMIO_D(_MMIO(0x67054));
+ MMIO_D(_MMIO(0x6e560));
+ MMIO_D(_MMIO(0x6e554));
+ MMIO_D(_MMIO(0x2b20));
+ MMIO_D(_MMIO(0x65f00));
+ MMIO_D(_MMIO(0x65f08));
+ MMIO_D(_MMIO(0x320f0));
+ MMIO_D(_MMIO(0x70034));
+ MMIO_D(_MMIO(0x71034));
+ MMIO_D(_MMIO(0x72034));
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)));
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)));
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)));
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)));
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)));
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)));
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)));
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)));
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)));
+ MMIO_D(_MMIO(0x44500));
+#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
+ MMIO_RING_D(CSFE_CHICKEN1_REG);
+#undef CSFE_CHICKEN1_REG
+ MMIO_D(GEN8_HDC_CHICKEN1);
+ MMIO_D(GEN9_WM_CHICKEN3);
+
+ if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ MMIO_D(GAMT_CHKN_BIT_REG);
+ if (!IS_BROXTON(dev_priv))
+ MMIO_D(GEN9_CTX_PREEMPT_REG);
+ MMIO_F(_MMIO(DMC_MMIO_START_RANGE), 0x3000);
+ return 0;
+}
+
+static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ struct drm_i915_private *dev_priv = iter->i915;
+
+ MMIO_F(_MMIO(0x80000), 0x3000);
+ MMIO_D(GEN7_SAMPLER_INSTDONE);
+ MMIO_D(GEN7_ROW_INSTDONE);
+ MMIO_D(GEN8_FAULT_TLB_DATA0);
+ MMIO_D(GEN8_FAULT_TLB_DATA1);
+ MMIO_D(ERROR_GEN6);
+ MMIO_D(DONE_REG);
+ MMIO_D(EIR);
+ MMIO_D(PGTBL_ER);
+ MMIO_D(_MMIO(0x4194));
+ MMIO_D(_MMIO(0x4294));
+ MMIO_D(_MMIO(0x4494));
+ MMIO_RING_D(RING_PSMI_CTL);
+ MMIO_RING_D(RING_DMA_FADD);
+ MMIO_RING_D(RING_DMA_FADD_UDW);
+ MMIO_RING_D(RING_IPEHR);
+ MMIO_RING_D(RING_INSTPS);
+ MMIO_RING_D(RING_BBADDR_UDW);
+ MMIO_RING_D(RING_BBSTATE);
+ MMIO_RING_D(RING_IPEIR);
+ MMIO_F(SOFT_SCRATCH(0), 16 * 4);
+ MMIO_D(BXT_P_CR_GT_DISP_PWRON);
+ MMIO_D(BXT_RP_STATE_CAP);
+ MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY0));
+ MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY1));
+ MMIO_D(BXT_PHY_CTL(PORT_A));
+ MMIO_D(BXT_PHY_CTL(PORT_B));
+ MMIO_D(BXT_PHY_CTL(PORT_C));
+ MMIO_D(BXT_PORT_PLL_ENABLE(PORT_A));
+ MMIO_D(BXT_PORT_PLL_ENABLE(PORT_B));
+ MMIO_D(BXT_PORT_PLL_ENABLE(PORT_C));
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0));
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0));
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0));
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1));
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1));
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1));
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1));
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1));
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1));
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1));
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1));
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1));
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10));
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10));
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10));
+ MMIO_D(BXT_DE_PLL_CTL);
+ MMIO_D(BXT_DE_PLL_ENABLE);
+ MMIO_D(BXT_DSI_PLL_CTL);
+ MMIO_D(BXT_DSI_PLL_ENABLE);
+ MMIO_D(GEN9_CLKGATE_DIS_0);
+ MMIO_D(GEN9_CLKGATE_DIS_4);
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C));
+ MMIO_D(RC6_CTX_BASE);
+ MMIO_D(GEN8_PUSHBUS_CONTROL);
+ MMIO_D(GEN8_PUSHBUS_ENABLE);
+ MMIO_D(GEN8_PUSHBUS_SHIFT);
+ MMIO_D(GEN6_GFXPAUSE);
+ MMIO_D(GEN8_L3SQCREG1);
+ MMIO_D(GEN8_L3CNTLREG);
+ MMIO_D(_MMIO(0x20D8));
+ MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40);
+ MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40);
+ MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40);
+ MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40);
+ MMIO_D(GEN9_CTX_PREEMPT_REG);
+ MMIO_D(GEN8_PRIVATE_PAT_LO);
+
+ return 0;
+}
+
+/**
+ * intel_gvt_iterate_mmio_table - Iterate the GVT MMIO table
+ * @iter: the interator
+ *
+ * This function is called for iterating the GVT MMIO table when i915 is
+ * taking the snapshot of the HW and GVT is building MMIO tracking table.
+ */
+int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter)
+{
+ struct drm_i915_private *i915 = iter->i915;
+ int ret;
+
+ ret = iterate_generic_mmio(iter);
+ if (ret)
+ goto err;
+
+ if (IS_BROADWELL(i915)) {
+ ret = iterate_bdw_only_mmio(iter);
+ if (ret)
+ goto err;
+ ret = iterate_bdw_plus_mmio(iter);
+ if (ret)
+ goto err;
+ ret = iterate_pre_skl_mmio(iter);
+ if (ret)
+ goto err;
+ } else if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
+ ret = iterate_bdw_plus_mmio(iter);
+ if (ret)
+ goto err;
+ ret = iterate_skl_plus_mmio(iter);
+ if (ret)
+ goto err;
+ } else if (IS_BROXTON(i915)) {
+ ret = iterate_bdw_plus_mmio(iter);
+ if (ret)
+ goto err;
+ ret = iterate_skl_plus_mmio(iter);
+ if (ret)
+ goto err;
+ ret = iterate_bxt_mmio(iter);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(intel_gvt_iterate_mmio_table, I915_GVT);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 1c841f68169a..e38d2db1c3e3 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -5,6 +5,8 @@
#include <linux/prandom.h>
+#include <uapi/drm/i915_drm.h>
+
#include "intel_memory_region.h"
#include "i915_drv.h"
#include "i915_ttm_buddy_manager.h"
@@ -17,7 +19,7 @@ static const struct {
.class = INTEL_MEMORY_SYSTEM,
.instance = 0,
},
- [INTEL_REGION_LMEM] = {
+ [INTEL_REGION_LMEM_0] = {
.class = INTEL_MEMORY_LOCAL,
.instance = 0,
},
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 21dcbd620758..3d8378c1b447 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -10,7 +10,7 @@
#include <linux/mutex.h>
#include <linux/io-mapping.h>
#include <drm/drm_mm.h>
-#include <drm/i915_drm.h>
+#include <uapi/drm/i915_drm.h>
struct drm_i915_private;
struct drm_i915_gem_object;
@@ -29,14 +29,17 @@ enum intel_memory_type {
enum intel_region_id {
INTEL_REGION_SMEM = 0,
- INTEL_REGION_LMEM,
+ INTEL_REGION_LMEM_0,
+ INTEL_REGION_LMEM_1,
+ INTEL_REGION_LMEM_2,
+ INTEL_REGION_LMEM_3,
INTEL_REGION_STOLEN_SMEM,
INTEL_REGION_STOLEN_LMEM,
INTEL_REGION_UNKNOWN, /* Should be last */
};
#define REGION_SMEM BIT(INTEL_REGION_SMEM)
-#define REGION_LMEM BIT(INTEL_REGION_LMEM)
+#define REGION_LMEM BIT(INTEL_REGION_LMEM_0)
#define REGION_STOLEN_SMEM BIT(INTEL_REGION_STOLEN_SMEM)
#define REGION_STOLEN_LMEM BIT(INTEL_REGION_STOLEN_LMEM)
@@ -54,6 +57,7 @@ struct intel_memory_region_ops {
int (*init_object)(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags);
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 4cce044efde2..e2b2bbdc0714 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -4,6 +4,7 @@
*/
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_pch.h"
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
@@ -256,7 +257,7 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
} else if (!pch) {
- if (run_as_guest() && HAS_DISPLAY(dev_priv)) {
+ if (i915_run_as_guest() && HAS_DISPLAY(dev_priv)) {
intel_virt_detect_pch(dev_priv, &id, &pch_type);
dev_priv->pch_type = pch_type;
dev_priv->pch_id = id;
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index 391a37492ce5..ac727546868e 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -136,7 +136,7 @@ static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
{
*status = __snb_pcode_rw(i915, mbox, &request, NULL, 500, 0, true);
- return *status || ((request & reply_mask) == reply);
+ return (*status == 0) && ((request & reply_mask) == reply);
}
/**
@@ -202,7 +202,7 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
out:
mutex_unlock(&i915->sb_lock);
- return ret ? ret : status;
+ return status ? status : ret;
#undef COND
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 5167d63010b9..5735915facc5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
*/
#include <linux/module.h>
+#include <linux/string_helpers.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
@@ -56,6 +57,8 @@
#include "vlv_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+static void skl_sagv_disable(struct drm_i915_private *dev_priv);
+
struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
};
@@ -418,8 +421,8 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
- enableddisabled(enable),
- enableddisabled(was_enabled));
+ str_enabled_disabled(enable),
+ str_enabled_disabled(was_enabled));
return was_enabled;
}
@@ -3669,8 +3672,8 @@ intel_has_sagv(struct drm_i915_private *dev_priv)
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
-static void
-skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
+static u32
+intel_sagv_block_time(struct drm_i915_private *dev_priv)
{
if (DISPLAY_VER(dev_priv) >= 12) {
u32 val = 0;
@@ -3679,26 +3682,48 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
ret = snb_pcode_read(dev_priv,
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
&val, NULL);
- if (!ret) {
- dev_priv->sagv_block_time_us = val;
- return;
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "Couldn't read SAGV block time!\n");
+ return 0;
}
- drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
+ return val;
} else if (DISPLAY_VER(dev_priv) == 11) {
- dev_priv->sagv_block_time_us = 10;
- return;
- } else if (DISPLAY_VER(dev_priv) == 10) {
- dev_priv->sagv_block_time_us = 20;
- return;
- } else if (DISPLAY_VER(dev_priv) == 9) {
- dev_priv->sagv_block_time_us = 30;
- return;
+ return 10;
+ } else if (DISPLAY_VER(dev_priv) == 9 && !IS_LP(dev_priv)) {
+ return 30;
} else {
- MISSING_CASE(DISPLAY_VER(dev_priv));
+ return 0;
}
+}
+
+static void intel_sagv_init(struct drm_i915_private *i915)
+{
+ if (!intel_has_sagv(i915))
+ i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
- dev_priv->sagv_block_time_us = 0;
+ /*
+ * Probe to see if we have working SAGV control.
+ * For icl+ this was already determined by intel_bw_init_hw().
+ */
+ if (DISPLAY_VER(i915) < 11)
+ skl_sagv_disable(i915);
+
+ drm_WARN_ON(&i915->drm, i915->sagv_status == I915_SAGV_UNKNOWN);
+
+ i915->sagv_block_time_us = intel_sagv_block_time(i915);
+
+ drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
+ str_yes_no(intel_has_sagv(i915)), i915->sagv_block_time_us);
+
+ /* avoid overflow when adding with wm0 latency/etc. */
+ if (drm_WARN(&i915->drm, i915->sagv_block_time_us > U16_MAX,
+ "Excessive SAGV block time %u, ignoring\n",
+ i915->sagv_block_time_us))
+ i915->sagv_block_time_us = 0;
+
+ if (!intel_has_sagv(i915))
+ i915->sagv_block_time_us = 0;
}
/*
@@ -3712,16 +3737,15 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
* - All planes can enable watermarks for latencies >= SAGV engine block time
* - We're not using an interlaced display configuration
*/
-static int
-intel_enable_sagv(struct drm_i915_private *dev_priv)
+static void skl_sagv_enable(struct drm_i915_private *dev_priv)
{
int ret;
if (!intel_has_sagv(dev_priv))
- return 0;
+ return;
if (dev_priv->sagv_status == I915_SAGV_ENABLED)
- return 0;
+ return;
drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
ret = snb_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
@@ -3736,26 +3760,24 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
- return 0;
+ return;
} else if (ret < 0) {
drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
- return ret;
+ return;
}
dev_priv->sagv_status = I915_SAGV_ENABLED;
- return 0;
}
-static int
-intel_disable_sagv(struct drm_i915_private *dev_priv)
+static void skl_sagv_disable(struct drm_i915_private *dev_priv)
{
int ret;
if (!intel_has_sagv(dev_priv))
- return 0;
+ return;
if (dev_priv->sagv_status == I915_SAGV_DISABLED)
- return 0;
+ return;
drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
@@ -3770,14 +3792,13 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
- return 0;
+ return;
} else if (ret < 0) {
drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
- return ret;
+ return;
}
dev_priv->sagv_status = I915_SAGV_DISABLED;
- return 0;
}
static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
@@ -3790,7 +3811,7 @@ static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
return;
if (!intel_can_enable_sagv(i915, new_bw_state))
- intel_disable_sagv(i915);
+ skl_sagv_disable(i915);
}
static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
@@ -3803,7 +3824,7 @@ static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
return;
if (intel_can_enable_sagv(i915, new_bw_state))
- intel_enable_sagv(i915);
+ skl_sagv_enable(i915);
}
static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
@@ -4325,46 +4346,31 @@ static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
const enum pipe pipe,
const enum plane_id plane_id,
- struct skl_ddb_entry *ddb_y,
- struct skl_ddb_entry *ddb_uv)
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
{
- u32 val, val2;
- u32 fourcc = 0;
+ u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(ddb_y, val);
+ skl_ddb_entry_init_from_hw(ddb, val);
return;
}
- val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id));
-
- /* No DDB allocated for disabled planes */
- if (val & PLANE_CTL_ENABLE)
- fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK_SKL,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
-
- if (DISPLAY_VER(dev_priv) >= 11) {
- val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(ddb_y, val);
- } else {
- val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
- val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb, val);
- if (fourcc &&
- drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
- swap(val, val2);
+ if (DISPLAY_VER(dev_priv) >= 11)
+ return;
- skl_ddb_entry_init_from_hw(ddb_y, val);
- skl_ddb_entry_init_from_hw(ddb_uv, val2);
- }
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb_y, val);
}
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb_y,
- struct skl_ddb_entry *ddb_uv)
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
@@ -4380,8 +4386,8 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
for_each_plane_id_on_crtc(crtc, plane_id)
skl_ddb_get_hw_plane_state(dev_priv, pipe,
plane_id,
- &ddb_y[plane_id],
- &ddb_uv[plane_id]);
+ &ddb[plane_id],
+ &ddb_y[plane_id]);
intel_display_power_put(dev_priv, power_domain, wakeref);
}
@@ -4913,17 +4919,6 @@ static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool
}
static bool
-use_min_ddb(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
-
- return DISPLAY_VER(i915) >= 13 &&
- crtc_state->uapi.async_flip &&
- plane->async_flip;
-}
-
-static bool
use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane)
{
@@ -4935,134 +4930,24 @@ use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
}
static u64
-skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int width, height;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- if (plane->id == PLANE_CURSOR)
- return 0;
-
- /*
- * We calculate extra ddb based on ratio plane rate/total data rate
- * in case, in some cases we should not allocate extra ddb for the plane,
- * so do not count its data rate, if this is the case.
- */
- if (use_min_ddb(crtc_state, plane))
- return 0;
-
- if (color_plane == 1 &&
- !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
- return 0;
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
- height = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- /* UV plane does 1/2 pixel sub-sampling */
- if (color_plane == 1) {
- width /= 2;
- height /= 2;
- }
-
- return width * height * fb->format->cpp[color_plane];
-}
-
-static u64
-skl_get_total_relative_data_rate(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- u64 total_data_rate = 0;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
- int i;
-
- /* Calculate and cache data rate for each plane */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe != crtc->pipe)
- continue;
-
- plane_id = plane->id;
-
- /* packed/y */
- crtc_state->plane_data_rate[plane_id] =
- skl_plane_relative_data_rate(crtc_state, plane_state, 0);
-
- /* uv-plane */
- crtc_state->uv_plane_data_rate[plane_id] =
- skl_plane_relative_data_rate(crtc_state, plane_state, 1);
- }
+ u64 data_rate = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
- total_data_rate += crtc_state->plane_data_rate[plane_id];
- total_data_rate += crtc_state->uv_plane_data_rate[plane_id];
- }
-
- return total_data_rate;
-}
-
-static u64
-icl_get_total_relative_data_rate(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- u64 total_data_rate = 0;
- enum plane_id plane_id;
- int i;
-
- /* Calculate and cache data rate for each plane */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe != crtc->pipe)
+ if (plane_id == PLANE_CURSOR)
continue;
- plane_id = plane->id;
-
- if (!plane_state->planar_linked_plane) {
- crtc_state->plane_data_rate[plane_id] =
- skl_plane_relative_data_rate(crtc_state, plane_state, 0);
- } else {
- enum plane_id y_plane_id;
-
- /*
- * The slave plane might not iterate in
- * intel_atomic_crtc_state_for_each_plane_state(),
- * and needs the master plane state which may be
- * NULL if we try get_new_plane_state(), so we
- * always calculate from the master.
- */
- if (plane_state->planar_slave)
- continue;
-
- /* Y plane rate is calculated on the slave */
- y_plane_id = plane_state->planar_linked_plane->id;
- crtc_state->plane_data_rate[y_plane_id] =
- skl_plane_relative_data_rate(crtc_state, plane_state, 0);
+ data_rate += crtc_state->rel_data_rate[plane_id];
- crtc_state->plane_data_rate[plane_id] =
- skl_plane_relative_data_rate(crtc_state, plane_state, 1);
- }
+ if (DISPLAY_VER(i915) < 11)
+ data_rate += crtc_state->rel_data_rate_y[plane_id];
}
- for_each_plane_id_on_crtc(crtc, plane_id)
- total_data_rate += crtc_state->plane_data_rate[plane_id];
-
- return total_data_rate;
+ return data_rate;
}
const struct skl_wm_level *
@@ -5103,18 +4988,18 @@ skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
* So this is actually safe to do.
*/
static void
-skl_check_wm_level(struct skl_wm_level *wm, u64 total)
+skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
{
- if (wm->min_ddb_alloc > total)
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
memset(wm, 0, sizeof(*wm));
}
static void
skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
- u64 total, u64 uv_total)
+ const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
{
- if (wm->min_ddb_alloc > total ||
- uv_wm->min_ddb_alloc > uv_total) {
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
+ uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
memset(wm, 0, sizeof(*wm));
memset(uv_wm, 0, sizeof(*uv_wm));
}
@@ -5134,17 +5019,16 @@ static bool icl_need_wm1_wa(struct drm_i915_private *i915,
struct skl_plane_ddb_iter {
u64 data_rate;
- u16 total[I915_MAX_PLANES];
- u16 uv_total[I915_MAX_PLANES];
u16 start, size;
};
-static u16
+static void
skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
+ struct skl_ddb_entry *ddb,
const struct skl_wm_level *wm,
u64 data_rate)
{
- u16 extra = 0;
+ u16 size, extra = 0;
if (data_rate) {
extra = min_t(u16, iter->size,
@@ -5154,7 +5038,15 @@ skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
iter->data_rate -= data_rate;
}
- return wm->min_ddb_alloc + extra;
+ /*
+ * Keep ddb entry of all disabled planes explicitly zeroed
+ * to avoid skl_ddb_add_affected_planes() adding them to
+ * the state when other planes change their allocations.
+ */
+ size = wm->min_ddb_alloc + extra;
+ if (size)
+ iter->start = skl_ddb_entry_init(ddb, iter->start,
+ iter->start + size);
}
static int
@@ -5168,32 +5060,31 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
intel_atomic_get_new_dbuf_state(state);
const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
int num_active = hweight8(dbuf_state->active_pipes);
- struct skl_plane_ddb_iter iter = {};
+ struct skl_plane_ddb_iter iter;
enum plane_id plane_id;
+ u16 cursor_size;
u32 blocks;
int level;
/* Clear the partitioning for disabled planes. */
+ memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
- memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
if (!crtc_state->hw.active)
return 0;
- if (DISPLAY_VER(dev_priv) >= 11)
- iter.data_rate = icl_get_total_relative_data_rate(state, crtc);
- else
- iter.data_rate = skl_get_total_relative_data_rate(state, crtc);
-
+ iter.start = alloc->start;
iter.size = skl_ddb_entry_size(alloc);
if (iter.size == 0)
return 0;
/* Allocate fixed number of blocks for cursor. */
- iter.total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
- iter.size -= iter.total[PLANE_CURSOR];
- skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR],
- alloc->end - iter.total[PLANE_CURSOR], alloc->end);
+ cursor_size = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= cursor_size;
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
+ alloc->end - cursor_size, alloc->end);
+
+ iter.data_rate = skl_total_relative_data_rate(crtc_state);
/*
* Find the highest watermark level for which we can satisfy the block
@@ -5206,7 +5097,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
- if (wm->wm[level].min_ddb_alloc > iter.total[PLANE_CURSOR]) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+
+ if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
drm_WARN_ON(&dev_priv->drm,
wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
@@ -5243,47 +5137,29 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* proportional to its relative data rate.
*/
for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR)
continue;
- iter.total[plane_id] =
- skl_allocate_plane_ddb(&iter, &wm->wm[level],
- crtc_state->plane_data_rate[plane_id]);
-
- iter.uv_total[plane_id] =
- skl_allocate_plane_ddb(&iter, &wm->uv_wm[level],
- crtc_state->uv_plane_data_rate[plane_id]);
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
+ crtc_state->rel_data_rate_y[plane_id]);
+ skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ } else {
+ skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ }
}
drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0);
- /* Set the actual DDB start/end points for each plane */
- iter.start = alloc->start;
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_ddb_entry *plane_alloc =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- struct skl_ddb_entry *uv_plane_alloc =
- &crtc_state->wm.skl.plane_ddb_uv[plane_id];
-
- if (plane_id == PLANE_CURSOR)
- continue;
-
- /* Gen11+ uses a separate plane for UV watermarks */
- drm_WARN_ON(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 11 && iter.uv_total[plane_id]);
-
- /* Leave disabled planes at (0,0) */
- if (iter.total[plane_id])
- iter.start = skl_ddb_entry_init(plane_alloc, iter.start,
- iter.start + iter.total[plane_id]);
-
- if (iter.uv_total[plane_id])
- iter.start = skl_ddb_entry_init(uv_plane_alloc, iter.start,
- iter.start + iter.uv_total[plane_id]);
- }
-
/*
* When we calculated watermark values we didn't know how high
* of a level we'd actually be able to hit, so we just marked
@@ -5292,12 +5168,20 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level],
- iter.total[plane_id],
- iter.uv_total[plane_id]);
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id))
+ skl_check_nv12_wm_level(&wm->wm[level],
+ &wm->uv_wm[level],
+ ddb_y, ddb);
+ else
+ skl_check_wm_level(&wm->wm[level], ddb);
if (icl_need_wm1_wa(dev_priv, plane_id) &&
level == 1 && wm->wm[0].enable) {
@@ -5313,12 +5197,24 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* if it turns out we don't have enough DDB blocks for them.
*/
for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- skl_check_wm_level(&wm->trans_wm, iter.total[plane_id]);
- skl_check_wm_level(&wm->sagv.wm0, iter.total[plane_id]);
- skl_check_wm_level(&wm->sagv.trans_wm, iter.total[plane_id]);
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_check_wm_level(&wm->trans_wm, ddb_y);
+ } else {
+ WARN_ON(skl_ddb_entry_size(ddb_y));
+
+ skl_check_wm_level(&wm->trans_wm, ddb);
+ }
+
+ skl_check_wm_level(&wm->sagv.wm0, ddb);
+ skl_check_wm_level(&wm->sagv.trans_wm, ddb);
}
return 0;
@@ -5408,6 +5304,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
}
wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
+ modifier == I915_FORMAT_MOD_4_TILED ||
modifier == I915_FORMAT_MOD_Yf_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
@@ -5578,6 +5475,25 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
blocks = fixed16_to_u32_round_up(selected_result) + 1;
+ /*
+ * Lets have blocks at minimum equivalent to plane_blocks_per_line
+ * as there will be at minimum one line for lines configuration. This
+ * is a work around for FIFO underruns observed with resolutions like
+ * 4k 60 Hz in single channel DRAM configurations.
+ *
+ * As per the Bspec 49325, if the ddb allocation can hold at least
+ * one plane_blocks_per_line, we should have selected method2 in
+ * the above logic. Assuming that modern versions have enough dbuf
+ * and method2 guarantees blocks equivalent to at least 1 line,
+ * select the blocks as plane_blocks_per_line.
+ *
+ * TODO: Revisit the logic when we have better understanding on DRAM
+ * channels' impact on the level 0 memory latency and the relevant
+ * wm calculations.
+ */
+ if (skl_wm_has_lines(dev_priv, level))
+ blocks = max(blocks,
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line));
lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
@@ -5926,7 +5842,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= PLANE_WM_EN;
if (level->ignore_lines)
val |= PLANE_WM_IGNORE_LINES;
- val |= level->blocks;
+ val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
intel_de_write_fw(dev_priv, reg, val);
@@ -5940,11 +5856,10 @@ void skl_write_plane_wm(struct intel_plane *plane,
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
- const struct skl_ddb_entry *ddb_uv =
- &crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++)
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
@@ -5954,25 +5869,20 @@ void skl_write_plane_wm(struct intel_plane *plane,
skl_plane_trans_wm(pipe_wm, plane_id));
if (HAS_HW_SAGV_WM(dev_priv)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id),
&wm->sagv.wm0);
skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id),
&wm->sagv.trans_wm);
}
- if (DISPLAY_VER(dev_priv) >= 11) {
- skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, plane_id), ddb_y);
- return;
- }
-
- if (wm->is_planar)
- swap(ddb_y, ddb_uv);
-
- skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, plane_id), ddb_y);
skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
+ PLANE_BUF_CFG(pipe, plane_id), ddb);
+
+ if (DISPLAY_VER(dev_priv) < 11)
+ skl_ddb_entry_write(dev_priv,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
}
void skl_write_cursor_wm(struct intel_plane *plane,
@@ -5984,7 +5894,7 @@ void skl_write_cursor_wm(struct intel_plane *plane,
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ &crtc_state->wm.skl.plane_ddb[plane_id];
for (level = 0; level <= max_level; level++)
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
@@ -6081,10 +5991,10 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
- if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
- &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
- skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
- &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
continue;
plane_state = intel_atomic_get_plane_state(state, plane);
@@ -6147,7 +6057,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- if (IS_ALDERLAKE_P(dev_priv))
+ if (HAS_MBUS_JOINING(dev_priv))
new_dbuf_state->joined_mbus =
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
@@ -6186,8 +6096,8 @@ skl_compute_ddb(struct intel_atomic_state *state)
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
INTEL_INFO(dev_priv)->dbuf.slice_mask,
- yesno(old_dbuf_state->joined_mbus),
- yesno(new_dbuf_state->joined_mbus));
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus));
}
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -6253,8 +6163,8 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
- old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
- new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
+ old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
@@ -6574,7 +6484,7 @@ static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
{
level->enable = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
- level->blocks = val & PLANE_WM_BLOCKS_MASK;
+ level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
}
@@ -6639,7 +6549,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc *crtc;
- if (IS_ALDERLAKE_P(dev_priv))
+ if (HAS_MBUS_JOINING(dev_priv))
dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -6656,16 +6566,16 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
- struct skl_ddb_entry *ddb_uv =
- &crtc_state->wm.skl.plane_ddb_uv[plane_id];
skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
- plane_id, ddb_y, ddb_uv);
+ plane_id, ddb, ddb_y);
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
- skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
}
dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
@@ -6689,7 +6599,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc->base.base.id, crtc->base.name,
dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
- yesno(dbuf_state->joined_mbus));
+ str_yes_no(dbuf_state->joined_mbus));
}
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
@@ -7000,7 +6910,8 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
"Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
- yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
+ str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
+ str_yes_no(wm->fbc_en));
}
void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
@@ -7578,6 +7489,9 @@ static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
/* Wa_22011091694:adlp */
intel_de_rmw(dev_priv, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
+
+ /* Bspec/49189 Initialize Sequence */
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -8175,8 +8089,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
else if (GRAPHICS_VER(dev_priv) == 5)
ilk_get_mem_freq(dev_priv);
- if (intel_has_sagv(dev_priv))
- skl_setup_sagv_block_time(dev_priv);
+ intel_sagv_init(dev_priv);
/* For FIFO watermark updates */
if (DISPLAY_VER(dev_priv) >= 9) {
@@ -8301,7 +8214,7 @@ static void update_mbus_pre_enable(struct intel_atomic_state *state)
const struct intel_dbuf_state *dbuf_state =
intel_atomic_get_new_dbuf_state(state);
- if (!IS_ALDERLAKE_P(dev_priv))
+ if (!HAS_MBUS_JOINING(dev_priv))
return;
/*
@@ -8367,3 +8280,55 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(dev_priv,
new_dbuf_state->enabled_slices);
}
+
+void intel_mbus_dbox_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc *crtc;
+ u32 val = 0;
+ int i;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (!new_dbuf_state ||
+ (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
+ new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ return;
+
+ if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
+ val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
+ }
+
+ /* Wa_22010947358:adl-p */
+ if (IS_ALDERLAKE_P(i915))
+ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
+ MBUS_DBOX_A_CREDIT(4);
+ else
+ val |= MBUS_DBOX_A_CREDIT(2);
+
+ if (IS_ALDERLAKE_P(i915)) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(12);
+ } else {
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->hw.active ||
+ !intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), val);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 51705151b842..50604cf7398c 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -94,5 +94,6 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
int intel_dbuf_init(struct drm_i915_private *dev_priv);
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
+void intel_mbus_dbox_update(struct intel_atomic_state *state);
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index 737ef3f4ab54..62ff77445b01 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -12,6 +12,7 @@
#include "intel_region_ttm.h"
+#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
/**
* DOC: TTM support structure
@@ -191,6 +192,7 @@ intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
*/
struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+ resource_size_t offset,
resource_size_t size,
unsigned int flags)
{
@@ -202,7 +204,10 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
- if (mem->io_size && mem->io_size < mem->total) {
+ if (offset != I915_BO_INVALID_OFFSET) {
+ place.fpfn = offset >> PAGE_SHIFT;
+ place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
+ } else if (mem->io_size && mem->io_size < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN;
} else {
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h
index fdee5e7bd46c..cf9d86dcf409 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.h
+++ b/drivers/gpu/drm/i915/intel_region_ttm.h
@@ -36,6 +36,7 @@ struct ttm_device_funcs *i915_ttm_driver(void);
#ifdef CONFIG_DRM_I915_SELFTEST
struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+ resource_size_t offset,
resource_size_t size,
unsigned int flags);
#endif
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index 4fd69ecd1481..74e8e4680028 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -131,6 +131,10 @@ static const struct intel_step_info adls_rpls_revids[] = {
[0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 },
};
+static const struct intel_step_info adlp_n_revids[] = {
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_D0 },
+};
+
void intel_step_init(struct drm_i915_private *i915)
{
const struct intel_step_info *revids = NULL;
@@ -150,6 +154,9 @@ void intel_step_init(struct drm_i915_private *i915)
} else if (IS_XEHPSDV(i915)) {
revids = xehpsdv_revids;
size = ARRAY_SIZE(xehpsdv_revids);
+ } else if (IS_ADLP_N(i915)) {
+ revids = adlp_n_revids;
+ size = ARRAY_SIZE(adlp_n_revids);
} else if (IS_ALDERLAKE_P(i915)) {
revids = adlp_revids;
size = ARRAY_SIZE(adlp_revids);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index dd8fdd5863de..83517a703eb6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1502,11 +1502,10 @@ ilk_dummy_write(struct intel_uncore *uncore)
static void
__unclaimed_reg_debug(struct intel_uncore *uncore,
const i915_reg_t reg,
- const bool read,
- const bool before)
+ const bool read)
{
if (drm_WARN(&uncore->i915->drm,
- check_for_unclaimed_mmio(uncore) && !before,
+ check_for_unclaimed_mmio(uncore),
"Unclaimed %s register 0x%x\n",
read ? "read from" : "write to",
i915_mmio_reg_offset(reg)))
@@ -1514,6 +1513,18 @@ __unclaimed_reg_debug(struct intel_uncore *uncore,
uncore->i915->params.mmio_debug--;
}
+static void
+__unclaimed_previous_reg_debug(struct intel_uncore *uncore,
+ const i915_reg_t reg,
+ const bool read)
+{
+ if (check_for_unclaimed_mmio(uncore))
+ drm_dbg(&uncore->i915->drm,
+ "Unclaimed access detected before %s register 0x%x\n",
+ read ? "read from" : "write to",
+ i915_mmio_reg_offset(reg));
+}
+
static inline void
unclaimed_reg_debug(struct intel_uncore *uncore,
const i915_reg_t reg,
@@ -1526,13 +1537,13 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
/* interrupts are disabled and re-enabled around uncore->lock usage */
lockdep_assert_held(&uncore->lock);
- if (before)
+ if (before) {
spin_lock(&uncore->debug->lock);
-
- __unclaimed_reg_debug(uncore, reg, read, before);
-
- if (!before)
+ __unclaimed_previous_reg_debug(uncore, reg, read);
+ } else {
+ __unclaimed_reg_debug(uncore, reg, read);
spin_unlock(&uncore->debug->lock);
+ }
}
#define __vgpu_read(x) \
@@ -2039,14 +2050,11 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-int intel_uncore_setup_mmio(struct intel_uncore *uncore)
+int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
{
struct drm_i915_private *i915 = uncore->i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- int mmio_bar;
int mmio_size;
- mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@@ -2063,7 +2071,7 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore)
else
mmio_size = 2 * 1024 * 1024;
- uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
+ uncore->regs = ioremap(phys_addr, mmio_size);
if (uncore->regs == NULL) {
drm_err(&i915->drm, "failed to map registers\n");
return -EIO;
@@ -2074,9 +2082,7 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore)
void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
{
- struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
-
- pci_iounmap(pdev, uncore->regs);
+ iounmap(uncore->regs);
}
void intel_uncore_init_early(struct intel_uncore *uncore,
@@ -2464,17 +2470,46 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
return fw_domains;
}
-u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
- i915_reg_t reg,
- int slice, int subslice)
+/**
+ * uncore_rw_with_mcr_steering_fw - Access a register after programming
+ * the MCR selector register.
+ * @uncore: pointer to struct intel_uncore
+ * @reg: register being accessed
+ * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
+ * @slice: slice number (ignored for multi-cast write)
+ * @subslice: sub-slice number (ignored for multi-cast write)
+ * @value: register value to be written (ignored for read)
+ *
+ * Return: 0 for write access. register value for read access.
+ *
+ * Caller needs to make sure the relevant forcewake wells are up.
+ */
+static u32 uncore_rw_with_mcr_steering_fw(struct intel_uncore *uncore,
+ i915_reg_t reg, u8 rw_flag,
+ int slice, int subslice, u32 value)
{
- u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
+ u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
lockdep_assert_held(&uncore->lock);
if (GRAPHICS_VER(uncore->i915) >= 11) {
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
+
+ /*
+ * Wa_22013088509
+ *
+ * The setting of the multicast/unicast bit usually wouldn't
+ * matter for read operations (which always return the value
+ * from a single register instance regardless of how that bit
+ * is set), but some platforms have a workaround requiring us
+ * to remain in multicast mode for reads. There's no real
+ * downside to this, so we'll just go ahead and do so on all
+ * platforms; we'll only clear the multicast bit from the mask
+ * when exlicitly doing a write operation.
+ */
+ if (rw_flag == FW_REG_WRITE)
+ mcr_mask |= GEN11_MCR_MULTICAST;
} else {
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
@@ -2486,7 +2521,10 @@ u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
mcr |= mcr_ss;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- val = intel_uncore_read_fw(uncore, reg);
+ if (rw_flag == FW_REG_READ)
+ val = intel_uncore_read_fw(uncore, reg);
+ else
+ intel_uncore_write_fw(uncore, reg, value);
mcr &= ~mcr_mask;
mcr |= old_mcr & mcr_mask;
@@ -2496,14 +2534,16 @@ u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
return val;
}
-u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, int slice, int subslice)
+static u32 uncore_rw_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, u8 rw_flag,
+ int slice, int subslice,
+ u32 value)
{
enum forcewake_domains fw_domains;
u32 val;
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
- FW_REG_READ);
+ rw_flag);
fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
@@ -2511,7 +2551,8 @@ u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
spin_lock_irq(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
- val = intel_uncore_read_with_mcr_steering_fw(uncore, reg, slice, subslice);
+ val = uncore_rw_with_mcr_steering_fw(uncore, reg, rw_flag,
+ slice, subslice, value);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irq(&uncore->lock);
@@ -2519,6 +2560,28 @@ u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
return val;
}
+u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
+ i915_reg_t reg, int slice, int subslice)
+{
+ return uncore_rw_with_mcr_steering_fw(uncore, reg, FW_REG_READ,
+ slice, subslice, 0);
+}
+
+u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, int slice, int subslice)
+{
+ return uncore_rw_with_mcr_steering(uncore, reg, FW_REG_READ,
+ slice, subslice, 0);
+}
+
+void intel_uncore_write_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 value,
+ int slice, int subslice)
+{
+ uncore_rw_with_mcr_steering(uncore, reg, FW_REG_WRITE,
+ slice, subslice, value);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_uncore.c"
#include "selftests/intel_uncore.c"
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 6ff56d673e2b..52fe3d89dd2b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -29,6 +29,7 @@
#include <linux/notifier.h>
#include <linux/hrtimer.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/types.h>
#include "i915_reg_defs.h"
@@ -214,12 +215,14 @@ u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
int slice, int subslice);
u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
i915_reg_t reg, int slice, int subslice);
-
+void intel_uncore_write_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 value,
+ int slice, int subslice);
void
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
void intel_uncore_init_early(struct intel_uncore *uncore,
struct intel_gt *gt);
-int intel_uncore_setup_mmio(struct intel_uncore *uncore);
+int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index 10e1e45471f1..c9da1015eb42 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -4,6 +4,8 @@
*/
#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
+
#include <drm/drm_print.h>
#include "gt/intel_gt_debugfs.h"
@@ -22,7 +24,7 @@ static int pxp_info_show(struct seq_file *m, void *data)
return 0;
}
- drm_printf(&p, "active: %s\n", yesno(intel_pxp_is_active(pxp)));
+ drm_printf(&p, "active: %s\n", str_yes_no(intel_pxp_is_active(pxp)));
drm_printf(&p, "instance counter: %u\n", pxp->key_instance);
return 0;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 598840b73dfa..92b00b4de240 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -3,8 +3,6 @@
* Copyright(c) 2020, Intel Corporation. All rights reserved.
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_pxp.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 2dac9be1de58..b61fe850e924 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -5,6 +5,7 @@
*/
#include <linux/kref.h>
+#include <linux/string_helpers.h>
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
@@ -280,7 +281,7 @@ void i915_active_print(struct i915_active *ref, struct drm_printer *m)
drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
drm_printf(m, "\tpreallocated barriers? %s\n",
- yesno(!llist_empty(&ref->preallocated_barriers)));
+ str_yes_no(!llist_empty(&ref->preallocated_barriers)));
if (i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index ab751192eb3b..8633bec18fa7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1112,10 +1112,16 @@ static int misaligned_case(struct i915_address_space *vm, struct intel_memory_re
expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
expected_node_size = expected_vma_size;
- if (NEEDS_COMPACT_PT(vm->i915) && i915_gem_object_is_lmem(obj)) {
- /* compact-pt should expand lmem node to 2MB */
+ if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
+ /*
+ * The compact-pt should expand lmem node to 2MB for the ppGTT,
+ * for all other cases we should only expect 64K.
+ */
expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
- expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+ if (NEEDS_COMPACT_PT(vm->i915) && !i915_is_ggtt(vm))
+ expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+ else
+ expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
}
if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
@@ -1150,7 +1156,7 @@ static int misaligned_pin(struct i915_address_space *vm,
flags |= PIN_GLOBAL;
for_each_memory_region(mr, vm->i915, id) {
- u64 min_alignment = i915_vm_min_alignment(vm, (enum intel_memory_type)id);
+ u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
u64 size = min_alignment;
u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
@@ -1205,7 +1211,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
goto out_free;
}
GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
- GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
+ assert_vm_alive(&ppgtt->vm);
err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
@@ -1438,7 +1444,7 @@ static void track_vma_bind(struct i915_vma *vma)
vma->resource->bi.pages = vma->pages;
mutex_lock(&vma->vm->mutex);
- list_add_tail(&vma->vm_link, &vma->vm->bound_list);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
mutex_unlock(&vma->vm->mutex);
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index ba32893e0873..73eb53edb8de 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -1043,13 +1043,21 @@ static int igt_lmem_write_cpu(void *arg)
}
i915_gem_object_lock(obj, NULL);
+
+ err = dma_resv_reserve_fences(obj->base.resv, 1);
+ if (err) {
+ i915_gem_object_unlock(obj);
+ goto out_put;
+ }
+
/* Put the pages into a known state -- from the gpu for added fun */
intel_engine_pm_get(engine);
err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
obj->mm.pages->sgl, I915_CACHE_NONE,
true, 0xdeadbeaf, &rq);
if (rq) {
- dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_WRITE);
i915_request_put(rq);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 573d9b2e1a4a..9c31a16f8380 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -73,7 +73,7 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq);
intel_region_ttm_device_fini(i915);
- intel_gt_driver_late_release(to_gt(i915));
+ intel_gt_driver_late_release_all(i915);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -112,6 +112,11 @@ static struct dev_pm_domain pm_domain = {
},
};
+static void mock_gt_probe(struct drm_i915_private *i915)
+{
+ i915->gt[0] = &i915->gt0;
+}
+
struct drm_i915_private *mock_gem_device(void)
{
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
@@ -180,11 +185,11 @@ struct drm_i915_private *mock_gem_device(void)
spin_lock_init(&i915->gpu_error.lock);
i915_gem_init__mm(i915);
- intel_gt_init_early(to_gt(i915), i915);
- __intel_gt_init_early(to_gt(i915), i915);
+ intel_root_gt_init_early(i915);
mock_uncore_init(&i915->uncore, i915);
atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
to_gt(i915)->awake = -ENODEV;
+ mock_gt_probe(i915);
ret = intel_region_ttm_device_init(i915);
if (ret)
@@ -229,7 +234,7 @@ err_unlock:
err_drv:
intel_region_ttm_device_fini(i915);
err_ttm:
- intel_gt_driver_late_release(to_gt(i915));
+ intel_gt_driver_late_release_all(i915);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
mock_destroy_device(i915);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index f64325491f35..670557ce1024 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -26,6 +26,7 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)
int err;
obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
+ obj->bo_offset,
obj->base.size,
obj->flags);
if (IS_ERR(obj->mm.res))
@@ -57,6 +58,7 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
static int mock_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
@@ -70,6 +72,8 @@ static int mock_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags);
+ obj->bo_offset = offset;
+
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
index 1d9da32195c2..664fde244f59 100644
--- a/drivers/gpu/drm/i915/vlv_suspend.c
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -3,6 +3,7 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/string_helpers.h>
#include <linux/kernel.h>
#include <drm/drm_print.h>
@@ -375,7 +376,7 @@ static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
if (vlv_wait_for_pw_status(dev_priv, mask, val))
drm_dbg(&dev_priv->drm,
"timeout waiting for GT wells to go %s\n",
- onoff(wait_for_on));
+ str_on_off(wait_for_on));
}
static void vlv_check_no_gt_access(struct drm_i915_private *i915)