aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig3
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c1
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c4
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c452
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c162
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h41
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1038
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c686
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h39
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c (renamed from drivers/gpu/drm/i915/display/intel_csr.c)418
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c115
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c230
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c67
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c282
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c309
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c175
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c184
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c59
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c1
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c217
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c79
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c1
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c345
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c79
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h36
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c98
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c177
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c27
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c42
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c10
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c40
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c13
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c5
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c33
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c121
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c113
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c54
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c40
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c106
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c146
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h48
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c50
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c213
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c76
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c62
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c166
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c24
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c61
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c36
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c22
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c18
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h51
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h106
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h52
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c67
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c22
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c532
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h233
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c101
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c124
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c122
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c14
-rw-r--r--drivers/gpu/drm/i915/i915_active.h11
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h5
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c435
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.h131
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c51
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c86
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h156
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c30
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c1
-rw-r--r--drivers/gpu/drm/i915/i915_globals.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c98
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c140
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h1
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c117
-rw-r--r--drivers/gpu/drm/i915/i915_params.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.h11
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c59
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c55
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c20
-rw-r--r--drivers/gpu/drm/i915/i915_query.c62
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h268
-rw-r--r--drivers/gpu/drm/i915/i915_request.c12
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c70
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h20
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c22
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c4
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c62
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h39
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h10
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c62
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h12
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h1
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c31
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h17
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c14
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c205
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h61
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c445
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h4
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c226
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.h34
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c2
-rw-r--r--drivers/gpu/drm/i915/intel_step.c12
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c44
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c789
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c220
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c13
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c70
269 files changed, 8805 insertions, 5699 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 69f57ca9c68d..b63d374dff23 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -20,13 +20,13 @@ config DRM_I915
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
- select IO_MAPPING
select SYNC_FILE
select IOSF_MBI
select CRC32
select SND_HDA_I915 if SND_HDA_CORE
select CEC_CORE if CEC_NOTIFIER
select VMAP_PFN
+ select DRM_TTM
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
@@ -102,7 +102,6 @@ config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
depends on 64BIT
- depends on VFIO_MDEV=y || VFIO_MDEV=DRM_I915
default n
help
Choose this option if you want to enable Intel GVT-g graphics
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d0d936d9137b..4f22cac1c49b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -50,6 +50,7 @@ i915-y += i915_drv.o \
intel_memory_region.o \
intel_pch.o \
intel_pm.o \
+ intel_region_ttm.o \
intel_runtime_pm.o \
intel_sideband.o \
intel_step.o \
@@ -160,7 +161,6 @@ gem-y += \
i915-y += \
$(gem-y) \
i915_active.o \
- i915_buddy.o \
i915_cmd_parser.o \
i915_gem_evict.o \
i915_gem_gtt.o \
@@ -201,10 +201,10 @@ i915-y += \
display/intel_combo_phy.o \
display/intel_connector.o \
display/intel_crtc.o \
- display/intel_csr.o \
display/intel_cursor.o \
display/intel_display.o \
display/intel_display_power.o \
+ display/intel_dmc.o \
display/intel_dpio_phy.o \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
@@ -263,6 +263,7 @@ i915-y += \
display/intel_lvds.o \
display/intel_panel.o \
display/intel_pps.o \
+ display/intel_qp_tables.o \
display/intel_sdvo.o \
display/intel_tv.o \
display/intel_vdsc.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index dfe3cf328d13..de0f358184aa 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -8,6 +8,7 @@
#include "g4x_dp.h"
#include "intel_audio.h"
#include "intel_connector.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 78f93506ffaf..be352e9f0afc 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -8,6 +8,7 @@
#include "g4x_hdmi.h"
#include "intel_audio.h"
#include "intel_connector.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 456374ddf37a..9643c45a2209 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -10,6 +10,7 @@
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_sprite.h"
@@ -144,7 +145,7 @@ static bool i9xx_plane_has_windowing(struct intel_plane *plane)
return i9xx_plane == PLANE_B;
else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
return false;
- else if (IS_DISPLAY_VER(dev_priv, 4))
+ else if (DISPLAY_VER(dev_priv) == 4)
return i9xx_plane == PLANE_C;
else
return i9xx_plane == PLANE_B ||
@@ -1039,4 +1040,3 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->fb = intel_fb;
}
-
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 9282978060b0..16812488c5dd 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -31,7 +31,9 @@
#include "intel_atomic.h"
#include "intel_combo_phy.h"
#include "intel_connector.h"
+#include "intel_crtc.h"
#include "intel_ddi.h"
+#include "intel_de.h"
#include "intel_dsi.h"
#include "intel_panel.h"
#include "intel_vdsc.h"
@@ -361,10 +363,19 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
int afe_clk_khz;
- u32 esc_clk_div_m;
+ int theo_word_clk, act_word_clk;
+ u32 esc_clk_div_m, esc_clk_div_m_phy;
afe_clk_khz = afe_clk(encoder, crtc_state);
- esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
+
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ theo_word_clk = DIV_ROUND_UP(afe_clk_khz, 8 * DSI_MAX_ESC_CLK);
+ act_word_clk = max(3, theo_word_clk + (theo_word_clk + 1) % 2);
+ esc_clk_div_m = act_word_clk * 8;
+ esc_clk_div_m_phy = (act_word_clk - 1) / 2;
+ } else {
+ esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
+ }
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port),
@@ -377,6 +388,14 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port));
}
+
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_de_write(dev_priv, ADL_MIPIO_DW(port, 8),
+ esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY);
+ intel_de_posting_read(dev_priv, ADL_MIPIO_DW(port, 8));
+ }
+ }
}
static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
@@ -592,7 +611,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
* a value '0' inside TA_PARAM_REGISTERS otherwise
* leave all fields at HW default values.
*/
- if (IS_DISPLAY_VER(dev_priv, 11)) {
+ if (DISPLAY_VER(dev_priv) == 11) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
tmp = intel_de_read(dev_priv,
@@ -1158,7 +1177,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_configure_transcoder(encoder, crtc_state);
/* Step 4l: Gate DDI clocks */
- if (IS_DISPLAY_VER(dev_priv, 11))
+ if (DISPLAY_VER(dev_priv) == 11)
gen11_dsi_gate_clocks(encoder);
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 4fa389fce8cb..b4e7ac51aa31 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -109,16 +109,6 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
return -EINVAL;
}
-static bool blob_equal(const struct drm_property_blob *a,
- const struct drm_property_blob *b)
-{
- if (a && b)
- return a->length == b->length &&
- !memcmp(a->data, b->data, a->length);
-
- return !a == !b;
-}
-
int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *state)
{
@@ -149,8 +139,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode ||
- !blob_equal(new_conn_state->base.hdr_output_metadata,
- old_conn_state->base.hdr_output_metadata))
+ !drm_connector_atomic_hdr_metadata_equal(old_state, new_state))
crtc_state->mode_changed = true;
return 0;
@@ -198,6 +187,26 @@ intel_connector_needs_modeset(struct intel_atomic_state *state,
new_conn_state->crtc)));
}
+/**
+ * intel_any_crtc_needs_modeset - check if any CRTC needs a modeset
+ * @state: the atomic state corresponding to this modeset
+ *
+ * Returns true if any CRTC in @state needs a modeset.
+ */
+bool intel_any_crtc_needs_modeset(struct intel_atomic_state *state)
+{
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (intel_crtc_needs_modeset(crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
struct intel_digital_connector_state *
intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
struct intel_connector *connector)
@@ -332,7 +341,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
plane_state->hw.fb->format->is_yuv &&
plane_state->hw.fb->format->num_planes > 1) {
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- if (IS_DISPLAY_VER(dev_priv, 9)) {
+ if (DISPLAY_VER(dev_priv) == 9) {
mode = SKL_PS_SCALER_MODE_NV12;
} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
/*
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 62a3365ed5e6..d2700c74c9da 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -35,6 +35,7 @@ struct drm_connector_state *
intel_digital_connector_duplicate_state(struct drm_connector *connector);
bool intel_connector_needs_modeset(struct intel_atomic_state *state,
struct drm_connector *connector);
+bool intel_any_crtc_needs_modeset(struct intel_atomic_state *state);
struct intel_digital_connector_state *
intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index c3f2962aa1eb..36f52a1d7552 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -102,7 +102,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi);
- intel_state->vma = NULL;
+ intel_state->ggtt_vma = NULL;
+ intel_state->dpt_vma = NULL;
intel_state->flags = 0;
/* add reference to fb */
@@ -125,7 +126,9 @@ intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct intel_plane_state *plane_state = to_intel_plane_state(state);
- drm_WARN_ON(plane->dev, plane_state->vma);
+
+ drm_WARN_ON(plane->dev, plane_state->ggtt_vma);
+ drm_WARN_ON(plane->dev, plane_state->dpt_vma);
__drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
if (plane_state->hw.fb)
@@ -133,25 +136,45 @@ intel_plane_destroy_state(struct drm_plane *plane,
kfree(plane_state);
}
-unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+unsigned int intel_adjusted_rate(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ unsigned int rate)
{
unsigned int src_w, src_h, dst_w, dst_h;
- unsigned int pixel_rate = crtc_state->pixel_rate;
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- dst_w = drm_rect_width(&plane_state->uapi.dst);
- dst_h = drm_rect_height(&plane_state->uapi.dst);
+ src_w = drm_rect_width(src) >> 16;
+ src_h = drm_rect_height(src) >> 16;
+ dst_w = drm_rect_width(dst);
+ dst_h = drm_rect_height(dst);
/* Downscaling limits the maximum pixel rate */
dst_w = min(src_w, dst_w);
dst_h = min(src_h, dst_h);
- return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, src_w * src_h),
+ return DIV_ROUND_UP_ULL(mul_u32_u32(rate, src_w * src_h),
dst_w * dst_h);
}
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ /*
+ * Note we don't check for plane visibility here as
+ * we want to use this when calculating the cursor
+ * watermarks even if the cursor is fully offscreen.
+ * That depends on the src/dst rectangles being
+ * correctly populated whenever the watermark code
+ * considers the cursor to be visible, whether or not
+ * it is actually visible.
+ *
+ * See: intel_wm_plane_visible() and intel_check_cursor()
+ */
+
+ return intel_adjusted_rate(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ crtc_state->pixel_rate);
+}
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 5c78a087ed86..dc4d05e75e1c 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -10,6 +10,7 @@
struct drm_plane;
struct drm_property;
+struct drm_rect;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -18,6 +19,9 @@ struct intel_plane_state;
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+unsigned int intel_adjusted_rate(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ unsigned int rate);
unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 9671c8f6e892..5f4f316b3ab5 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -31,6 +31,7 @@
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_cdclk.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_lpe_audio.h"
@@ -591,40 +592,33 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
val = intel_de_read(i915, AUD_CONFIG_BE);
- if (IS_DISPLAY_VER(i915, 11))
+ if (DISPLAY_VER(i915) == 11)
val |= HBLANK_EARLY_ENABLE_ICL(pipe);
else if (DISPLAY_VER(i915) >= 12)
val |= HBLANK_EARLY_ENABLE_TGL(pipe);
if (crtc_state->dsc.compression_enable &&
- (crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
- crtc_state->hw.adjusted_mode.vdisplay >= 2160)) {
+ crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
+ crtc_state->hw.adjusted_mode.vdisplay >= 2160) {
/* Get hblank early enable value required */
+ val &= ~HBLANK_START_COUNT_MASK(pipe);
hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state);
- if (hblank_early_prog < 32) {
- val &= ~HBLANK_START_COUNT_MASK(pipe);
+ if (hblank_early_prog < 32)
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
- } else if (hblank_early_prog < 64) {
- val &= ~HBLANK_START_COUNT_MASK(pipe);
+ else if (hblank_early_prog < 64)
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
- } else if (hblank_early_prog < 96) {
- val &= ~HBLANK_START_COUNT_MASK(pipe);
+ else if (hblank_early_prog < 96)
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
- } else {
- val &= ~HBLANK_START_COUNT_MASK(pipe);
+ else
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
- }
/* Get samples room value required */
+ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
samples_room = calc_samples_room(crtc_state);
- if (samples_room < 3) {
- val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ if (samples_room < 3)
val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
- } else {
- /* Program 0 i.e "All Samples available in buffer" */
- val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ else /* Program 0 i.e "All Samples available in buffer" */
val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
- }
}
intel_de_write(i915, AUD_CONFIG_BE, val);
@@ -1309,7 +1303,7 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 9) {
aud_freq_init = intel_de_read(dev_priv, AUD_FREQ_CNTRL);
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
aud_freq = AUD_FREQ_GEN12;
else
aud_freq = aud_freq_init;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 3d0c035b5e38..5b6922e28ef2 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -610,7 +610,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
* Only parse SDVO mappings on gens that could have SDVO. This isn't
* accurate and doesn't have to be, as long as it's not too strict.
*/
- if (!IS_DISPLAY_RANGE(i915, 3, 7)) {
+ if (!IS_DISPLAY_VER(i915, 3, 7)) {
drm_dbg_kms(&i915->drm, "Skipping SDVO device mapping\n");
return;
}
@@ -917,7 +917,7 @@ parse_psr(struct drm_i915_private *i915, const struct bdb_header *bdb)
* Old decimal value is wake up time in multiples of 100 us.
*/
if (bdb->version >= 205 &&
- (IS_GEN9_BC(i915) || DISPLAY_VER(i915) >= 10)) {
+ (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
switch (psr_table->tp1_wakeup_time) {
case 0:
i915->vbt.psr.tp1_wakeup_time_us = 500;
@@ -1651,7 +1651,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
const u8 *ddc_pin_map;
int n_entries;
- if (HAS_PCH_ADP(i915)) {
+ if (IS_ALDERLAKE_S(i915)) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
@@ -1659,7 +1659,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
} else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (HAS_PCH_TGP(i915) && IS_GEN9_BC(i915)) {
+ } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) {
ddc_pin_map = gen9bc_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
} else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
@@ -1743,8 +1743,24 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
[PORT_TC3] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
[PORT_TC4] = { DVO_PORT_HDMIE, DVO_PORT_DPE, -1 },
};
+ static const int xelpd_port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_D_XELPD] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ [PORT_E_XELPD] = { DVO_PORT_HDMIE, DVO_PORT_DPE, -1 },
+ [PORT_TC1] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 },
+ [PORT_TC2] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 },
+ [PORT_TC3] = { DVO_PORT_HDMIH, DVO_PORT_DPH, -1 },
+ [PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
+ };
- if (IS_ALDERLAKE_S(i915))
+ if (DISPLAY_VER(i915) == 13)
+ return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping),
+ ARRAY_SIZE(xelpd_port_mapping[0]),
+ xelpd_port_mapping,
+ dvo_port);
+ else if (IS_ALDERLAKE_S(i915))
return __dvo_port_to_port(ARRAY_SIZE(adls_port_mapping),
ARRAY_SIZE(adls_port_mapping[0]),
adls_port_mapping,
@@ -1852,6 +1868,19 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
}
+static bool is_port_valid(struct drm_i915_private *i915, enum port port)
+{
+ /*
+ * On some ICL/CNL SKUs port F is not present, but broken VBTs mark
+ * the port as present. Only try to initialize port F for the
+ * SKUs that may actually have it.
+ */
+ if (port == PORT_F && (IS_ICELAKE(i915) || IS_CANNONLAKE(i915)))
+ return IS_ICL_WITH_PORT_F(i915) || IS_CNL_WITH_PORT_F(i915);
+
+ return true;
+}
+
static void parse_ddi_port(struct drm_i915_private *i915,
struct intel_bios_encoder_data *devdata)
{
@@ -1865,6 +1894,13 @@ static void parse_ddi_port(struct drm_i915_private *i915,
if (port == PORT_NONE)
return;
+ if (!is_port_valid(i915, port)) {
+ drm_dbg_kms(&i915->drm,
+ "VBT reports port %c as supported, but that can't be true: skipping\n",
+ port_name(port));
+ return;
+ }
+
info = &i915->vbt.ddi_port_info[port];
if (info->devdata) {
@@ -2770,7 +2806,8 @@ intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata =
i915->vbt.ddi_port_info[port].devdata;
- if (drm_WARN_ON_ONCE(&i915->drm, !IS_GEN9_LP(i915)))
+ if (drm_WARN_ON_ONCE(&i915->drm,
+ !IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
return false;
return devdata && devdata->child.hpd_invert;
@@ -2852,7 +2889,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_C;
break;
case DP_AUX_D:
- if (IS_ALDERLAKE_S(i915))
+ if (DISPLAY_VER(i915) == 13)
+ aux_ch = AUX_CH_D_XELPD;
+ else if (IS_ALDERLAKE_S(i915))
aux_ch = AUX_CH_USBC3;
else if (IS_DG1(i915) || IS_ROCKETLAKE(i915))
aux_ch = AUX_CH_USBC2;
@@ -2860,22 +2899,36 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_D;
break;
case DP_AUX_E:
- if (IS_ALDERLAKE_S(i915))
+ if (DISPLAY_VER(i915) == 13)
+ aux_ch = AUX_CH_E_XELPD;
+ else if (IS_ALDERLAKE_S(i915))
aux_ch = AUX_CH_USBC4;
else
aux_ch = AUX_CH_E;
break;
case DP_AUX_F:
- aux_ch = AUX_CH_F;
+ if (DISPLAY_VER(i915) == 13)
+ aux_ch = AUX_CH_USBC1;
+ else
+ aux_ch = AUX_CH_F;
break;
case DP_AUX_G:
- aux_ch = AUX_CH_G;
+ if (DISPLAY_VER(i915) == 13)
+ aux_ch = AUX_CH_USBC2;
+ else
+ aux_ch = AUX_CH_G;
break;
case DP_AUX_H:
- aux_ch = AUX_CH_H;
+ if (DISPLAY_VER(i915) == 13)
+ aux_ch = AUX_CH_USBC3;
+ else
+ aux_ch = AUX_CH_H;
break;
case DP_AUX_I:
- aux_ch = AUX_CH_I;
+ if (DISPLAY_VER(i915) == 13)
+ aux_ch = AUX_CH_USBC4;
+ else
+ aux_ch = AUX_CH_I;
break;
default:
MISSING_CASE(info->alternate_aux_channel);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 584ab5ce4106..bfb398f0432e 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -77,7 +77,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->num_points = dram_info->num_qgv_points;
- if (IS_DISPLAY_VER(dev_priv, 12))
+ if (DISPLAY_VER(dev_priv) == 12)
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = 4;
@@ -89,7 +89,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->t_bl = 16;
break;
}
- else if (IS_DISPLAY_VER(dev_priv, 11))
+ else if (DISPLAY_VER(dev_priv) == 11)
qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
if (drm_WARN_ON(&dev_priv->drm,
@@ -162,7 +162,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
{
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = dev_priv->dram_info.num_channels;
+ int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
int deinterleave;
int ipqdepth, ipqdepthpch;
int dclk_max;
@@ -267,13 +267,13 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ALDERLAKE_S(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
icl_get_bw_info(dev_priv, &adls_sa_info);
else if (IS_ROCKETLAKE(dev_priv))
icl_get_bw_info(dev_priv, &rkl_sa_info);
- else if (IS_DISPLAY_VER(dev_priv, 12))
+ else if (DISPLAY_VER(dev_priv) == 12)
icl_get_bw_info(dev_priv, &tgl_sa_info);
- else if (IS_DISPLAY_VER(dev_priv, 11))
+ else if (DISPLAY_VER(dev_priv) == 11)
icl_get_bw_info(dev_priv, &icl_sa_info);
}
@@ -344,6 +344,9 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe)
data_rate += bw_state->data_rate[pipe];
+ if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active())
+ data_rate = data_rate * 105 / 100;
+
return data_rate;
}
@@ -390,7 +393,6 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int max_bw = 0;
- int slice_id;
enum pipe pipe;
int i;
@@ -418,6 +420,7 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
&crtc_state->wm.skl.plane_ddb_uv[plane_id];
unsigned int data_rate = crtc_state->data_rate[plane_id];
unsigned int dbuf_mask = 0;
+ enum dbuf_slice slice;
dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
@@ -435,8 +438,8 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
* pessimistic, which shouldn't pose any significant
* problem anyway.
*/
- for_each_dbuf_slice_in_mask(slice_id, dbuf_mask)
- crtc_bw->used_bw[slice_id] += data_rate;
+ for_each_dbuf_slice_in_mask(dev_priv, slice, dbuf_mask)
+ crtc_bw->used_bw[slice] += data_rate;
}
}
@@ -445,10 +448,11 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
for_each_pipe(dev_priv, pipe) {
struct intel_dbuf_bw *crtc_bw;
+ enum dbuf_slice slice;
crtc_bw = &new_bw_state->dbuf_bw[pipe];
- for_each_dbuf_slice(slice_id) {
+ for_each_dbuf_slice(dev_priv, slice) {
/*
* Current experimental observations show that contrary
* to BSpec we get underruns once we exceed 64 * CDCLK
@@ -457,7 +461,7 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
* bumped up all the time we calculate CDCLK according
* to this formula for overall bw consumed by slices.
*/
- max_bw += crtc_bw->used_bw[slice_id];
+ max_bw += crtc_bw->used_bw[slice];
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 3f43ad4d7362..613ffcc68eba 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -26,7 +26,9 @@
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
+#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_psr.h"
#include "intel_sideband.h"
/**
@@ -723,12 +725,28 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
bdw_calc_voltage_level(cdclk_config->cdclk);
}
+static u32 bdw_cdclk_freq_sel(int cdclk)
+{
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
+ fallthrough;
+ case 337500:
+ return LCPLL_CLK_FREQ_337_5_BDW;
+ case 450000:
+ return LCPLL_CLK_FREQ_450;
+ case 540000:
+ return LCPLL_CLK_FREQ_54O_BDW;
+ case 675000:
+ return LCPLL_CLK_FREQ_675_BDW;
+ }
+}
+
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
int cdclk = cdclk_config->cdclk;
- u32 val;
int ret;
if (drm_WARN(&dev_priv->drm,
@@ -748,9 +766,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val |= LCPLL_CD_SOURCE_FCLK;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL,
+ 0, LCPLL_CD_SOURCE_FCLK);
/*
* According to the spec, it should be enough to poll for this 1 us.
@@ -760,32 +777,11 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
LCPLL_CD_SOURCE_FCLK_DONE, 100))
drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val &= ~LCPLL_CLK_FREQ_MASK;
-
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- fallthrough;
- case 337500:
- val |= LCPLL_CLK_FREQ_337_5_BDW;
- break;
- case 450000:
- val |= LCPLL_CLK_FREQ_450;
- break;
- case 540000:
- val |= LCPLL_CLK_FREQ_54O_BDW;
- break;
- case 675000:
- val |= LCPLL_CLK_FREQ_675_BDW;
- break;
- }
-
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL,
+ LCPLL_CLK_FREQ_MASK, bdw_cdclk_freq_sel(cdclk));
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val &= ~LCPLL_CD_SOURCE_FCLK;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK, 0);
if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
@@ -954,10 +950,8 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
intel_update_max_cdclk(dev_priv);
}
-static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
+static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco)
{
- u32 val;
-
drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
@@ -969,23 +963,24 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
* rate later on, with the constraint of choosing a frequency that
* works with vco.
*/
- val = intel_de_read(dev_priv, DPLL_CTRL1);
-
- val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
- DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
- val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
if (vco == 8640000)
- val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
- SKL_DPLL0);
+ return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0);
else
- val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
- SKL_DPLL0);
+ return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0);
+}
- intel_de_write(dev_priv, DPLL_CTRL1, val);
+static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
+{
+ intel_de_rmw(dev_priv, DPLL_CTRL1,
+ DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0),
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0) |
+ skl_dpll0_link_rate(dev_priv, vco));
intel_de_posting_read(dev_priv, DPLL_CTRL1);
- intel_de_write(dev_priv, LCPLL1_CTL,
- intel_de_read(dev_priv, LCPLL1_CTL) | LCPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, LCPLL1_CTL,
+ 0, LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
drm_err(&dev_priv->drm, "DPLL0 not locked\n");
@@ -998,14 +993,38 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
- intel_de_write(dev_priv, LCPLL1_CTL,
- intel_de_read(dev_priv, LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, LCPLL1_CTL,
+ LCPLL_PLL_ENABLE, 0);
+
if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
}
+static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
+ int cdclk, int vco)
+{
+ switch (cdclk) {
+ default:
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
+ fallthrough;
+ case 308571:
+ case 337500:
+ return CDCLK_FREQ_337_308;
+ case 450000:
+ case 432000:
+ return CDCLK_FREQ_450_432;
+ case 540000:
+ return CDCLK_FREQ_540;
+ case 617143:
+ case 675000:
+ return CDCLK_FREQ_675_617;
+ }
+}
+
static void skl_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
@@ -1036,29 +1055,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- /* Choose frequency for this cdclk */
- switch (cdclk) {
- default:
- drm_WARN_ON(&dev_priv->drm,
- cdclk != dev_priv->cdclk.hw.bypass);
- drm_WARN_ON(&dev_priv->drm, vco != 0);
- fallthrough;
- case 308571:
- case 337500:
- freq_select = CDCLK_FREQ_337_308;
- break;
- case 450000:
- case 432000:
- freq_select = CDCLK_FREQ_450_432;
- break;
- case 540000:
- freq_select = CDCLK_FREQ_540;
- break;
- case 617143:
- case 675000:
- freq_select = CDCLK_FREQ_675_617;
- break;
- }
+ freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco);
if (dev_priv->cdclk.hw.vco != 0 &&
dev_priv->cdclk.hw.vco != vco)
@@ -1257,6 +1254,42 @@ static const struct intel_cdclk_vals rkl_cdclk_table[] = {
{}
};
+static const struct intel_cdclk_vals adlp_a_step_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
+static const struct intel_cdclk_vals adlp_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
@@ -1432,18 +1465,12 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- drm_WARN(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 10,
- "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
div = 4;
break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
- drm_WARN(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv),
- "Unsupported divider\n");
div = 8;
break;
default:
@@ -1477,12 +1504,9 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
{
int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
- u32 val;
- val = intel_de_read(dev_priv, BXT_DE_PLL_CTL);
- val &= ~BXT_DE_PLL_RATIO_MASK;
- val |= BXT_DE_PLL_RATIO(ratio);
- intel_de_write(dev_priv, BXT_DE_PLL_CTL, val);
+ intel_de_rmw(dev_priv, BXT_DE_PLL_CTL,
+ BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio));
intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
@@ -1496,16 +1520,12 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
{
- u32 val;
-
- val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
- val &= ~BXT_DE_PLL_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+ intel_de_rmw(dev_priv, BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_PLL_ENABLE, 0);
/* Timeout 200us */
- if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
- drm_err(&dev_priv->drm,
- "timeout waiting for CDCLK PLL unlock\n");
+ if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
}
@@ -1522,9 +1542,37 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
- drm_err(&dev_priv->drm,
- "timeout waiting for CDCLK PLL lock\n");
+ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n");
+
+ dev_priv->cdclk.hw.vco = vco;
+}
+
+static bool has_cdclk_crawl(struct drm_i915_private *i915)
+{
+ return INTEL_INFO(i915)->has_cdclk_crawl;
+}
+
+static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
+{
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ u32 val;
+
+ /* Write PLL ratio without disabling */
+ val = CNL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE;
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+
+ /* Submit freq change request */
+ val |= BXT_DE_PLL_FREQ_REQ;
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+
+ /* Timeout 200us */
+ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
+ DRM_ERROR("timeout waiting for FREQ change request ack\n");
+
+ val &= ~BXT_DE_PLL_FREQ_REQ;
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
dev_priv->cdclk.hw.vco = vco;
}
@@ -1549,13 +1597,34 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe
}
}
+static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
+ int cdclk, int vco)
+{
+ /* cdclk = vco / 2 / div{1,1.5,2,4} */
+ switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+ default:
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
+ fallthrough;
+ case 2:
+ return BXT_CDCLK_CD2X_DIV_SEL_1;
+ case 3:
+ return BXT_CDCLK_CD2X_DIV_SEL_1_5;
+ case 4:
+ return BXT_CDCLK_CD2X_DIV_SEL_2;
+ case 8:
+ return BXT_CDCLK_CD2X_DIV_SEL_4;
+ }
+}
+
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
- u32 val, divider;
+ u32 val;
int ret;
/* Inform power controller of upcoming frequency change. */
@@ -1580,41 +1649,16 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- /* cdclk = vco / 2 / div{1,1.5,2,4} */
- switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
- default:
- drm_WARN_ON(&dev_priv->drm,
- cdclk != dev_priv->cdclk.hw.bypass);
- drm_WARN_ON(&dev_priv->drm, vco != 0);
- fallthrough;
- case 2:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- break;
- case 3:
- drm_WARN(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 10,
- "Unsupported divider\n");
- divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
- break;
- case 4:
- divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- break;
- case 8:
- drm_WARN(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv),
- "Unsupported divider\n");
- divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- break;
- }
-
- if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
+ if (has_cdclk_crawl(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) {
+ if (dev_priv->cdclk.hw.vco != vco)
+ adlp_cdclk_pll_crawl(dev_priv, vco);
+ } else if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
if (dev_priv->cdclk.hw.vco != 0 &&
dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_disable(dev_priv);
if (dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_enable(dev_priv, vco);
-
} else {
if (dev_priv->cdclk.hw.vco != 0 &&
dev_priv->cdclk.hw.vco != vco)
@@ -1624,14 +1668,16 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
bxt_de_pll_enable(dev_priv, vco);
}
- val = divider | skl_cdclk_decimal(cdclk) |
- bxt_cdclk_cd2x_pipe(dev_priv, pipe);
+ val = bxt_cdclk_cd2x_div_sel(dev_priv, cdclk, vco) |
+ bxt_cdclk_cd2x_pipe(dev_priv, pipe) |
+ skl_cdclk_decimal(cdclk);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
*/
- if (IS_GEN9_LP(dev_priv) && cdclk >= 500000)
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ cdclk >= 500000)
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
intel_de_write(dev_priv, CDCLK_CTL, val);
@@ -1710,29 +1756,16 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
expected = skl_cdclk_decimal(cdclk);
/* Figure out what CD2X divider we should be using for this cdclk */
- switch (DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.vco,
- dev_priv->cdclk.hw.cdclk)) {
- case 2:
- expected |= BXT_CDCLK_CD2X_DIV_SEL_1;
- break;
- case 3:
- expected |= BXT_CDCLK_CD2X_DIV_SEL_1_5;
- break;
- case 4:
- expected |= BXT_CDCLK_CD2X_DIV_SEL_2;
- break;
- case 8:
- expected |= BXT_CDCLK_CD2X_DIV_SEL_4;
- break;
- default:
- goto sanitize;
- }
+ expected |= bxt_cdclk_cd2x_div_sel(dev_priv,
+ dev_priv->cdclk.hw.cdclk,
+ dev_priv->cdclk.hw.vco);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
*/
- if (IS_GEN9_LP(dev_priv) && dev_priv->cdclk.hw.cdclk >= 500000)
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ dev_priv->cdclk.hw.cdclk >= 500000)
expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (cdctl == expected)
@@ -1797,9 +1830,9 @@ static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
*/
void intel_cdclk_init_hw(struct drm_i915_private *i915)
{
- if (IS_GEN9_LP(i915) || DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915))
bxt_cdclk_init_hw(i915);
- else if (IS_GEN9_BC(i915))
+ else if (DISPLAY_VER(i915) == 9)
skl_cdclk_init_hw(i915);
}
@@ -1812,12 +1845,34 @@ void intel_cdclk_init_hw(struct drm_i915_private *i915)
*/
void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 10 || IS_GEN9_LP(i915))
+ if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915))
bxt_cdclk_uninit_hw(i915);
- else if (IS_GEN9_BC(i915))
+ else if (DISPLAY_VER(i915) == 9)
skl_cdclk_uninit_hw(i915);
}
+static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
+{
+ int a_div, b_div;
+
+ if (!has_cdclk_crawl(dev_priv))
+ return false;
+
+ /*
+ * The vco and cd2x divider will change independently
+ * from each, so we disallow cd2x change when crawling.
+ */
+ a_div = DIV_ROUND_CLOSEST(a->vco, a->cdclk);
+ b_div = DIV_ROUND_CLOSEST(b->vco, b->cdclk);
+
+ return a->vco != 0 && b->vco != 0 &&
+ a->vco != b->vco &&
+ a_div == b_div &&
+ a->ref == b->ref;
+}
+
/**
* intel_cdclk_needs_modeset - Determine if changong between the CDCLK
* configurations requires a modeset on all pipes
@@ -1852,7 +1907,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *b)
{
/* Older hw doesn't have the capability */
- if (DISPLAY_VER(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv))
return false;
return a->cdclk != b->cdclk &&
@@ -1907,6 +1962,12 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_pause(intel_dp);
+ }
+
/*
* Lock aux/gmbus while we change cdclk in case those
* functions use cdclk. Not all platforms/ports do,
@@ -1929,6 +1990,12 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
}
mutex_unlock(&dev_priv->gmbus_mutex);
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_resume(intel_dp);
+ }
+
if (drm_WARN(&dev_priv->drm,
intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
"cdclk state doesn't match!\n")) {
@@ -2002,7 +2069,7 @@ static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(dev_priv) >= 10)
return DIV_ROUND_UP(pixel_rate, 2);
- else if (IS_DISPLAY_VER(dev_priv, 9) ||
+ else if (DISPLAY_VER(dev_priv) == 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return pixel_rate;
else if (IS_CHERRYVIEW(dev_priv))
@@ -2050,10 +2117,10 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
crtc_state->has_audio &&
crtc_state->port_clock >= 540000 &&
crtc_state->lane_count == 4) {
- if (IS_DISPLAY_VER(dev_priv, 10)) {
+ if (DISPLAY_VER(dev_priv) == 10) {
/* Display WA #1145: glk,cnl */
min_cdclk = max(316800, min_cdclk);
- } else if (IS_DISPLAY_VER(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
+ } else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv)) {
/* Display WA #1144: skl,bxt */
min_cdclk = max(432000, min_cdclk);
}
@@ -2389,44 +2456,6 @@ static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
return 0;
}
-static int intel_modeset_all_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- /*
- * Add all pipes to the state, and force
- * a modeset on all the active ones.
- */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
- int ret;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (!crtc_state->hw.active ||
- drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
- continue;
-
- crtc_state->uapi.mode_changed = true;
-
- ret = drm_atomic_add_affected_connectors(&state->base,
- &crtc->base);
- if (ret)
- return ret;
-
- ret = intel_atomic_add_affected_planes(state, crtc);
- if (ret)
- return ret;
-
- crtc_state->update_planes |= crtc_state->active_planes;
- }
-
- return 0;
-}
-
static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
int min_cdclk;
@@ -2499,7 +2528,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_cdclk_state *old_cdclk_state;
struct intel_cdclk_state *new_cdclk_state;
- enum pipe pipe;
+ enum pipe pipe = INVALID_PIPE;
int ret;
new_cdclk_state = intel_atomic_get_cdclk_state(state);
@@ -2551,15 +2580,18 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
pipe = INVALID_PIPE;
- } else {
- pipe = INVALID_PIPE;
}
- if (pipe != INVALID_PIPE) {
+ if (intel_cdclk_can_crawl(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk via crawl\n");
+ } else if (pipe != INVALID_PIPE) {
new_cdclk_state->pipe = pipe;
drm_dbg_kms(&dev_priv->drm,
- "Can change cdclk with pipe %c active\n",
+ "Can change cdclk cd2x divider with pipe %c active\n",
pipe_name(pipe));
} else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
@@ -2568,8 +2600,6 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
- new_cdclk_state->pipe = INVALID_PIPE;
-
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}
@@ -2592,7 +2622,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 10)
return 2 * max_cdclk_freq;
- else if (IS_DISPLAY_VER(dev_priv, 9) ||
+ else if (DISPLAY_VER(dev_priv) == 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
@@ -2625,7 +2655,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
dev_priv->max_cdclk_freq = 652800;
} else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->max_cdclk_freq = 528000;
- } else if (IS_GEN9_BC(dev_priv)) {
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ dev_priv->max_cdclk_freq = 316800;
+ } else if (IS_BROXTON(dev_priv)) {
+ dev_priv->max_cdclk_freq = 624000;
+ } else if (DISPLAY_VER(dev_priv) == 9) {
u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
@@ -2647,10 +2681,6 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
max_cdclk = 308571;
dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
- } else if (IS_GEMINILAKE(dev_priv)) {
- dev_priv->max_cdclk_freq = 316800;
- } else if (IS_BROXTON(dev_priv)) {
- dev_priv->max_cdclk_freq = 624000;
} else if (IS_BROADWELL(dev_priv)) {
/*
* FIXME with extra cooling we can allow
@@ -2848,7 +2878,17 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_ROCKETLAKE(dev_priv)) {
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ /* Wa_22011320316:adlp[a0] */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+ dev_priv->cdclk.table = adlp_a_step_cdclk_table;
+ else
+ dev_priv->cdclk.table = adlp_cdclk_table;
+ } else if (IS_ROCKETLAKE(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
@@ -2878,7 +2918,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = cnl_calc_voltage_level;
dev_priv->cdclk.table = cnl_cdclk_table;
- } else if (IS_GEN9_LP(dev_priv)) {
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
@@ -2887,7 +2927,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->cdclk.table = glk_cdclk_table;
else
dev_priv->cdclk.table = bxt_cdclk_table;
- } else if (IS_GEN9_BC(dev_priv)) {
+ } else if (DISPLAY_VER(dev_priv) == 9) {
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
@@ -2908,9 +2948,9 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
}
- if (DISPLAY_VER(dev_priv) >= 10 || IS_GEN9_LP(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10 || IS_BROXTON(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;
- else if (IS_GEN9_BC(dev_priv))
+ else if (DISPLAY_VER(dev_priv) == 9)
dev_priv->display.get_cdclk = skl_get_cdclk;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.get_cdclk = bdw_get_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index c75d7124d57a..dab892d2251b 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -23,6 +23,7 @@
*/
#include "intel_color.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -225,7 +226,7 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
*/
return crtc_state->limited_color_range &&
(IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
- IS_DISPLAY_RANGE(dev_priv, 9, 10));
+ IS_DISPLAY_VER(dev_priv, 9, 10));
}
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
@@ -1711,7 +1712,7 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat
} else {
if (DISPLAY_VER(dev_priv) >= 11)
return icl_gamma_precision(crtc_state);
- else if (IS_DISPLAY_VER(dev_priv, 10))
+ else if (DISPLAY_VER(dev_priv) == 10)
return glk_gamma_precision(crtc_state);
else if (IS_IRONLAKE(dev_priv))
return ilk_gamma_precision(crtc_state);
@@ -2136,7 +2137,7 @@ void intel_color_init(struct intel_crtc *crtc)
if (DISPLAY_VER(dev_priv) >= 11) {
dev_priv->display.load_luts = icl_load_luts;
dev_priv->display.read_luts = icl_read_luts;
- } else if (IS_DISPLAY_VER(dev_priv, 10)) {
+ } else if (DISPLAY_VER(dev_priv) == 10) {
dev_priv->display.load_luts = glk_load_luts;
dev_priv->display.read_luts = glk_read_luts;
} else if (DISPLAY_VER(dev_priv) >= 8) {
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 5df57d16a401..487c54cd5982 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -4,6 +4,7 @@
*/
#include "intel_combo_phy.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#define for_each_combo_phy(__dev_priv, __phy) \
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index d5ceb7bdc14b..9bed1ccecea0 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -282,14 +282,12 @@ void
intel_attach_hdmi_colorspace_property(struct drm_connector *connector)
{
if (!drm_mode_create_hdmi_colorspace_property(connector))
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+ drm_connector_attach_colorspace_property(connector);
}
void
intel_attach_dp_colorspace_property(struct drm_connector *connector)
{
if (!drm_mode_create_dp_colorspace_property(connector))
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+ drm_connector_attach_colorspace_property(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 580d652c3276..648f1c0d3d39 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -36,7 +36,9 @@
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_crt.h"
+#include "intel_crtc.h"
#include "intel_ddi.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
@@ -356,7 +358,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
- else if (IS_DISPLAY_RANGE(dev_priv, 3, 4))
+ else if (IS_DISPLAY_VER(dev_priv, 3, 4))
max_clock = 400000;
else
max_clock = 350000;
@@ -711,7 +713,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
/* Set the border color to purple. */
intel_uncore_write(uncore, bclrpat_reg, 0x500050);
- if (!IS_DISPLAY_VER(dev_priv, 2)) {
+ if (DISPLAY_VER(dev_priv) != 2) {
u32 pipeconf = intel_uncore_read(uncore, pipeconf_reg);
intel_uncore_write(uncore,
pipeconf_reg,
@@ -1047,7 +1049,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
else
crt->base.pipe_mask = ~0;
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 39358076c05b..95ff1707b4bd 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -302,11 +302,11 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
if (IS_CHERRYVIEW(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
funcs = &g4x_crtc_funcs;
- else if (IS_DISPLAY_VER(dev_priv, 4))
+ else if (DISPLAY_VER(dev_priv) == 4)
funcs = &i965_crtc_funcs;
else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
funcs = &i915gm_crtc_funcs;
- else if (IS_DISPLAY_VER(dev_priv, 3))
+ else if (DISPLAY_VER(dev_priv) == 3)
funcs = &i915_crtc_funcs;
else
funcs = &i8xx_crtc_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
index 08112d557411..a5ae997581aa 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.h
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -18,5 +18,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
struct intel_crtc *crtc);
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_csr.h b/drivers/gpu/drm/i915/display/intel_csr.h
deleted file mode 100644
index 03c64f8af7ab..000000000000
--- a/drivers/gpu/drm/i915/display/intel_csr.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_CSR_H__
-#define __INTEL_CSR_H__
-
-struct drm_i915_private;
-
-#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
-#define CSR_VERSION_MAJOR(version) ((version) >> 16)
-#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
-
-void intel_csr_ucode_init(struct drm_i915_private *i915);
-void intel_csr_load_program(struct drm_i915_private *i915);
-void intel_csr_ucode_fini(struct drm_i915_private *i915);
-void intel_csr_ucode_suspend(struct drm_i915_private *i915);
-void intel_csr_ucode_resume(struct drm_i915_private *i915);
-
-#endif /* __INTEL_CSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 2345f2efd60b..966e020331fb 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -13,6 +13,7 @@
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_cursor.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_display.h"
#include "intel_fb.h"
@@ -382,6 +383,10 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
cntl |= MCURSOR_ROTATE_180;
+ /* Wa_22012358565:adlp */
+ if (DISPLAY_VER(dev_priv) == 13)
+ cntl |= MCURSOR_ARB_SLOTS(1);
+
return cntl;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 953de42e277c..390869bd6b63 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -31,8 +31,10 @@
#include "intel_audio.h"
#include "intel_combo_phy.h"
#include "intel_connector.h"
+#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -113,7 +115,8 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
&n_entries);
/* If we're boosting the current, set bit 31 of trans1 */
- if (IS_GEN9_BC(dev_priv) && intel_bios_encoder_dp_boost_level(encoder->devdata))
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ intel_bios_encoder_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
@@ -146,7 +149,8 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
level = n_entries - 1;
/* If we're boosting the current, set bit 31 of trans1 */
- if (IS_GEN9_BC(dev_priv) && intel_bios_encoder_hdmi_boost_level(encoder->devdata))
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ intel_bios_encoder_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
@@ -174,7 +178,7 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
enum port port)
{
/* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
- if (DISPLAY_VER(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) < 10) {
usleep_range(518, 1000);
return;
}
@@ -245,15 +249,48 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
}
}
+static u32 ddi_buf_phy_link_rate(int port_clock)
+{
+ switch (port_clock) {
+ case 162000:
+ return DDI_BUF_PHY_LINK_RATE(0);
+ case 216000:
+ return DDI_BUF_PHY_LINK_RATE(4);
+ case 243000:
+ return DDI_BUF_PHY_LINK_RATE(5);
+ case 270000:
+ return DDI_BUF_PHY_LINK_RATE(1);
+ case 324000:
+ return DDI_BUF_PHY_LINK_RATE(6);
+ case 432000:
+ return DDI_BUF_PHY_LINK_RATE(7);
+ case 540000:
+ return DDI_BUF_PHY_LINK_RATE(2);
+ case 810000:
+ return DDI_BUF_PHY_LINK_RATE(3);
+ default:
+ MISSING_CASE(port_clock);
+ return DDI_BUF_PHY_LINK_RATE(0);
+ }
+}
+
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
intel_dp->DP = dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
intel_dp->DP |= DDI_PORT_WIDTH(crtc_state->lane_count);
+
+ if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) {
+ intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
+ if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ }
}
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
@@ -471,7 +508,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
- if (IS_DISPLAY_RANGE(dev_priv, 8, 10) &&
+ if (IS_DISPLAY_VER(dev_priv, 8, 10) &&
crtc_state->master_transcoder != INVALID_TRANSCODER) {
u8 master_select =
bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
@@ -546,7 +583,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- if (IS_DISPLAY_RANGE(dev_priv, 8, 10))
+ if (IS_DISPLAY_VER(dev_priv, 8, 10))
ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
@@ -759,7 +796,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
*is_dp_mst = mst_pipe_mask;
out:
- if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
+ if (*pipe_mask && (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))) {
tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
@@ -850,18 +887,19 @@ void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ u32 val;
if (cpu_transcoder != TRANSCODER_EDP) {
- if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_write(dev_priv,
- TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_PORT(port));
+ if (DISPLAY_VER(dev_priv) >= 13)
+ val = TGL_TRANS_CLK_SEL_PORT(phy);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
else
- intel_de_write(dev_priv,
- TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_PORT(port));
+ val = TRANS_CLK_SEL_PORT(encoder->port);
+
+ intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
}
}
@@ -974,9 +1012,11 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
if (DISPLAY_VER(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
+ else if (IS_ALDERLAKE_P(dev_priv))
+ adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
else
tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
- } else if (IS_DISPLAY_VER(dev_priv, 11)) {
+ } else if (DISPLAY_VER(dev_priv) == 11) {
if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE))
jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
@@ -987,7 +1027,7 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
} else if (IS_CANNONLAKE(dev_priv)) {
cnl_get_buf_trans(encoder, crtc_state, &n_entries);
- } else if (IS_GEN9_LP(dev_priv)) {
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
bxt_get_buf_trans(encoder, crtc_state, &n_entries);
} else {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
@@ -1420,7 +1460,10 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
return;
- ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
+ if (IS_ALDERLAKE_P(dev_priv))
+ ddi_translations = adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
+ else
+ ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -1454,6 +1497,14 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port));
val &= ~DKL_TX_DP20BITMODE;
intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val);
+
+ if ((intel_crtc_has_dp_encoder(crtc_state) &&
+ crtc_state->port_clock == 162000) ||
+ (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
+ crtc_state->port_clock == 594000))
+ val |= DKL_TX_LOADGEN_SHARING_PMD_DISABLE;
+ else
+ val &= ~DKL_TX_LOADGEN_SHARING_PMD_DISABLE;
}
}
@@ -1555,7 +1606,7 @@ hsw_set_signal_levels(struct intel_dp *intel_dp,
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
intel_dp->DP |= signal_levels;
- if (IS_GEN9_BC(dev_priv))
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
skl_ddi_set_iboost(encoder, crtc_state, level);
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
@@ -2332,8 +2383,8 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL,
enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
drm_dbg_kms(&i915->drm,
- "Failed to set MSA_TIMING_PAR_IGNORE %s in the sink\n",
- enable ? "enable" : "disable");
+ "Failed to %s MSA_TIMING_PAR_IGNORE in the sink\n",
+ enabledisable(enable));
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@@ -2648,7 +2699,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
icl_ddi_vswing_sequence(encoder, crtc_state, level);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_ddi_vswing_sequence(encoder, crtc_state, level);
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
@@ -2759,7 +2810,6 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
conn_state);
/* FIXME precompute everything properly */
- /* FIXME how do we turn infoframes off again? */
if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -3092,20 +3142,20 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 12)
tgl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_DISPLAY_VER(dev_priv, 11))
+ else if (DISPLAY_VER(dev_priv) == 11)
icl_ddi_vswing_sequence(encoder, crtc_state, level);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_ddi_vswing_sequence(encoder, crtc_state, level);
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
- if (IS_GEN9_BC(dev_priv))
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
skl_ddi_set_iboost(encoder, crtc_state, level);
/* Display WA #1143: skl,kbl,cfl */
- if (IS_GEN9_BC(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
/*
* For some reason these chicken bits have been
* stuffed into a transcoder register, event though
@@ -3144,6 +3194,9 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
+ *
+ * On ADL_P the PHY link rate and lane count must be programmed but
+ * these are both 0 for HDMI.
*/
intel_de_write(dev_priv, DDI_BUF_CTL(port),
dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
@@ -3321,7 +3374,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
* Type-C ports. Skip this step for TBT.
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_ddi_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
}
@@ -3679,7 +3732,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
if (!pipe_config->bigjoiner_slave)
ddi_dotclock_get(pipe_config);
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
@@ -3705,6 +3758,8 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
+
+ intel_psr_get_config(encoder, pipe_config);
}
void intel_ddi_get_clock(struct intel_encoder *encoder,
@@ -3885,7 +3940,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
pipe_config->pch_pfit.enabled ||
pipe_config->crc_enabled;
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -4007,9 +4062,11 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
+ struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
intel_dp_encoder_flush_work(encoder);
+ intel_display_power_flush_work(i915);
drm_encoder_cleanup(encoder);
if (dig_port)
@@ -4053,7 +4110,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
dig_port->dp.set_signal_levels = icl_set_signal_levels;
else if (IS_CANNONLAKE(dev_priv))
dig_port->dp.set_signal_levels = cnl_set_signal_levels;
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
dig_port->dp.set_signal_levels = bxt_set_signal_levels;
else
dig_port->dp.set_signal_levels = hsw_set_signal_levels;
@@ -4296,7 +4353,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
* supported configuration
*/
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
return true;
/* Cannonlake: Most of SKUs don't support DDI_E, and the only
@@ -4350,6 +4407,17 @@ static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
i915->hti_state & HDPORT_DDI_USED(phy);
}
+static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (port >= PORT_D_XELPD)
+ return HPD_PORT_D + port - PORT_D_XELPD;
+ else if (port >= PORT_TC1)
+ return HPD_PORT_TC1 + port - PORT_TC1;
+ else
+ return HPD_PORT_A + port - PORT_A;
+}
+
static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -4489,7 +4557,13 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder = &dig_port->base;
encoder->devdata = devdata;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 13 && port >= PORT_D_XELPD) {
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ "DDI %c/PHY %c",
+ port_name(port - PORT_D_XELPD + PORT_D),
+ phy_name(phy));
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
@@ -4585,10 +4659,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->disable_clock = cnl_ddi_disable_clock;
encoder->is_clock_enabled = cnl_ddi_is_clock_enabled;
encoder->get_config = cnl_ddi_get_config;
- } else if (IS_GEN9_LP(dev_priv)) {
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
/* BXT/GLK have fixed PLL->port mapping */
encoder->get_config = bxt_ddi_get_config;
- } else if (IS_GEN9_BC(dev_priv)) {
+ } else if (DISPLAY_VER(dev_priv) == 9) {
encoder->enable_clock = skl_ddi_enable_clock;
encoder->disable_clock = skl_ddi_disable_clock;
encoder->is_clock_enabled = skl_ddi_is_clock_enabled;
@@ -4600,7 +4674,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->get_config = hsw_ddi_get_config;
}
- if (IS_DG1(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 13)
+ encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
+ else if (IS_DG1(dev_priv))
encoder->hpd_pin = dg1_hpd_pin(dev_priv, port);
else if (IS_ROCKETLAKE(dev_priv))
encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
@@ -4608,11 +4684,11 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
else if (IS_JSL_EHL(dev_priv))
encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
- else if (IS_DISPLAY_VER(dev_priv, 11))
+ else if (DISPLAY_VER(dev_priv) == 11)
encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
- else if (IS_DISPLAY_VER(dev_priv, 10))
+ else if (IS_CANNONLAKE(dev_priv))
encoder->hpd_pin = cnl_hpd_pin(dev_priv, port);
- else if (IS_DISPLAY_VER(dev_priv, 9))
+ else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
else
encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
@@ -4654,9 +4730,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
dig_port->hpd_pulse = intel_dp_hpd_pulse;
- /* Splitter enable for eDP MSO is supported for pipe A only. */
- if (dig_port->dp.mso_link_count)
+ /* Splitter enable for eDP MSO is limited to certain pipes. */
+ if (dig_port->dp.mso_link_count) {
encoder->pipe_mask = BIT(PIPE_A);
+ if (IS_ALDERLAKE_P(dev_priv))
+ encoder->pipe_mask |= BIT(PIPE_B);
+ }
}
/* In theory we don't need the encoder->type check, but leave it just in
@@ -4672,7 +4751,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
else
dig_port->connected = lpt_digital_port_connected;
} else if (DISPLAY_VER(dev_priv) >= 8) {
- if (port == PORT_A || IS_GEN9_LP(dev_priv))
+ if (port == PORT_A || IS_GEMINILAKE(dev_priv) ||
+ IS_BROXTON(dev_priv))
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 5d9ce6042e87..8bfd00f49f2a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
#include "intel_display_types.h"
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
@@ -734,6 +735,34 @@ static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
+static const struct tgl_dkl_phy_ddi_buf_trans adlp_dkl_phy_dp_ddi_trans_hbr[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x01 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x06 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x17 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x04 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0A }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x06 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x09 }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB */
+};
+
bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
{
return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
@@ -881,7 +910,7 @@ intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_GEN9_BC(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
const struct ddi_buf_trans *ddi_translations =
skl_get_buf_trans_edp(encoder, n_entries);
*n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
@@ -919,7 +948,7 @@ intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_GEN9_BC(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
return skl_get_buf_trans_hdmi(dev_priv, n_entries);
} else if (IS_BROADWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
@@ -1347,6 +1376,31 @@ tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
}
+static const struct tgl_dkl_phy_ddi_buf_trans *
+adlp_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3);
+ return adlp_dkl_phy_dp_ddi_trans_hbr2_hbr3;
+ }
+
+ *n_entries = ARRAY_SIZE(adlp_dkl_phy_dp_ddi_trans_hbr);
+ return adlp_dkl_phy_dp_ddi_trans_hbr;
+}
+
+const struct tgl_dkl_phy_ddi_buf_trans *
+adlp_get_dkl_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else
+ return adlp_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *default_entry)
@@ -1361,7 +1415,7 @@ int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
else
tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, &n_entries);
*default_entry = n_entries - 1;
- } else if (IS_DISPLAY_VER(dev_priv, 11)) {
+ } else if (DISPLAY_VER(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
else
@@ -1370,10 +1424,10 @@ int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
} else if (IS_CANNONLAKE(dev_priv)) {
cnl_get_buf_trans_hdmi(encoder, &n_entries);
*default_entry = n_entries - 1;
- } else if (IS_GEN9_LP(dev_priv)) {
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
bxt_get_buf_trans_hdmi(encoder, &n_entries);
*default_entry = n_entries - 1;
- } else if (IS_GEN9_BC(dev_priv)) {
+ } else if (DISPLAY_VER(dev_priv) == 9) {
intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
*default_entry = 8;
} else if (IS_BROADWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index f8f0ef87e977..4c2efab38642 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -67,6 +67,10 @@ bxt_get_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries);
+const struct tgl_dkl_phy_ddi_buf_trans *
+adlp_get_dkl_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
const struct cnl_ddi_buf_trans *
tgl_get_combo_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 00da10bf35f5..9d8c177aa228 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -8,6 +8,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_trace.h"
#include "intel_uncore.h"
static inline u32
@@ -22,26 +23,12 @@ intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
intel_uncore_posting_read(&i915->uncore, reg);
}
-/* Note: read the warnings for intel_uncore_*_fw() functions! */
-static inline u32
-intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
-{
- return intel_uncore_read_fw(&i915->uncore, reg);
-}
-
static inline void
intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
{
intel_uncore_write(&i915->uncore, reg, val);
}
-/* Note: read the warnings for intel_uncore_*_fw() functions! */
-static inline void
-intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
-{
- intel_uncore_write_fw(&i915->uncore, reg, val);
-}
-
static inline void
intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
{
@@ -69,4 +56,30 @@ intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
}
+/*
+ * Unlocked mmio-accessors, think carefully before using these.
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
+ */
+static inline u32
+intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ u32 val;
+
+ val = intel_uncore_read_fw(&i915->uncore, reg);
+ trace_i915_reg_rw(false, reg, val, sizeof(val), true);
+
+ return val;
+}
+
+static inline void
+intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ trace_i915_reg_rw(true, reg, val, sizeof(val), true);
+ intel_uncore_write_fw(&i915->uncore, reg, val);
+}
+
#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 64e9107d70f7..3bad4e00f7be 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -63,9 +63,11 @@
#include "display/intel_vdsc.h"
#include "display/intel_vrr.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
#include "gt/intel_rps.h"
+#include "gt/gen8_ppgtt.h"
#include "g4x_dp.h"
#include "g4x_hdmi.h"
@@ -77,8 +79,9 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
-#include "intel_csr.h"
+#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dp_link_training.h"
#include "intel_fbc.h"
#include "intel_fdi.h"
@@ -122,6 +125,182 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
+struct i915_dpt {
+ struct i915_address_space vm;
+
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+};
+
+#define i915_is_dpt(vm) ((vm)->is_dpt)
+
+static inline struct i915_dpt *
+i915_vm_to_dpt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
+ GEM_BUG_ON(!i915_is_dpt(vm));
+ return container_of(vm, struct i915_dpt, vm);
+}
+
+#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void dpt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+
+ gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
+ vm->pte_encode(addr, level, flags));
+}
+
+static void dpt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+ const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
+ struct sgt_iter sgt_iter;
+ dma_addr_t addr;
+ int i;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ i = vma->node.start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+ gen8_set_pte(&base[i++], pte_encode | addr);
+}
+
+static void dpt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void dpt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ u32 pte_flags;
+
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ pte_flags = 0;
+ if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
+ pte_flags |= PTE_READ_ONLY;
+ if (i915_gem_object_is_lmem(obj))
+ pte_flags |= PTE_LM;
+
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
+ /*
+ * Without aliasing PPGTT there's no difference between
+ * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+ * upgrade to both bound if we bind either to avoid double-binding.
+ */
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+}
+
+static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+{
+ vm->clear_range(vm, vma->node.start, vma->size);
+}
+
+static void dpt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_gem_object_put(dpt->obj);
+}
+
+static struct i915_address_space *
+intel_dpt_create(struct intel_framebuffer *fb)
+{
+ struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
+ struct drm_i915_private *i915 = to_i915(obj->dev);
+ struct drm_i915_gem_object *dpt_obj;
+ struct i915_address_space *vm;
+ struct i915_dpt *dpt;
+ size_t size;
+ int ret;
+
+ if (intel_fb_needs_pot_stride_remap(fb))
+ size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
+ else
+ size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
+
+ size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
+
+ if (HAS_LMEM(i915))
+ dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
+ else
+ dpt_obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(dpt_obj))
+ return ERR_CAST(dpt_obj);
+
+ ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
+ if (ret) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(ret);
+ }
+
+ dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
+ if (!dpt) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vm = &dpt->vm;
+
+ vm->gt = &i915->gt;
+ vm->i915 = i915;
+ vm->dma = i915->drm.dev;
+ vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ vm->is_dpt = true;
+
+ i915_address_space_init(vm, VM_CLASS_DPT);
+
+ vm->insert_page = dpt_insert_page;
+ vm->clear_range = dpt_clear_range;
+ vm->insert_entries = dpt_insert_entries;
+ vm->cleanup = dpt_cleanup;
+
+ vm->vma_ops.bind_vma = dpt_bind_vma;
+ vm->vma_ops.unbind_vma = dpt_unbind_vma;
+ vm->vma_ops.set_pages = ggtt_set_pages;
+ vm->vma_ops.clear_pages = clear_pages;
+
+ vm->pte_encode = gen8_ggtt_pte_encode;
+
+ dpt->obj = dpt_obj;
+
+ return &dpt->vm;
+}
+
+static void intel_dpt_destroy(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_vm_close(&dpt->vm);
+}
+
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
{
@@ -230,7 +409,7 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
u32 line1, line2;
u32 line_mask;
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
@@ -796,6 +975,11 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
+ /* Wa_22012358565:adlp */
+ if (DISPLAY_VER(dev_priv) == 13)
+ intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
+ 0, PIPE_ARB_USE_PROG_SLOTS);
+
reg = PIPECONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
if (val & PIPECONF_ENABLE) {
@@ -874,7 +1058,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
case DRM_FORMAT_MOD_LINEAR:
return intel_tile_size(dev_priv);
case I915_FORMAT_MOD_X_TILED:
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
return 128;
else
return 512;
@@ -889,7 +1073,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 64;
fallthrough;
case I915_FORMAT_MOD_Y_TILED:
- if (IS_DISPLAY_VER(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
+ if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
@@ -972,10 +1156,29 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ if (intel_fb_uses_dpt(fb))
+ return 512 * 4096;
+
/* AUX_DIST needs only 4K alignment */
- if ((DISPLAY_VER(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
- is_ccs_plane(fb, color_plane))
+ if (is_ccs_plane(fb, color_plane))
+ return 4096;
+
+ if (is_semiplanar_uv_plane(fb, color_plane)) {
+ /*
+ * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
+ * alignment for linear UV planes on all platforms.
+ */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ return intel_linear_alignment(dev_priv);
+
+ return intel_tile_row_size(fb, color_plane);
+ }
+
return 4096;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, color_plane != 0);
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -985,19 +1188,12 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
return 256 * 1024;
return 0;
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (is_semiplanar_uv_plane(fb, color_plane))
- return intel_tile_row_size(fb, color_plane);
- fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
- if (DISPLAY_VER(dev_priv) >= 12 &&
- is_semiplanar_uv_plane(fb, color_plane))
- return intel_tile_row_size(fb, color_plane);
- fallthrough;
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
default:
@@ -1016,6 +1212,62 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
}
+static struct i915_vma *
+intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags,
+ struct i915_address_space *vm)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_vma *vma;
+ u32 alignment;
+ int ret;
+
+ if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
+
+ alignment = 4096 * 512;
+
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma))
+ goto err;
+
+ if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+ ret = i915_vma_unbind(vma);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+ }
+
+ ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+
+ i915_gem_object_flush_if_display(obj);
+
+ i915_vma_get(vma);
+err:
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
+ return vma;
+}
+
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
bool phys_cursor,
@@ -1253,6 +1505,9 @@ static const struct drm_format_info gen12_ccs_formats[] = {
{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
.char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV12, .num_planes = 4,
.char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
.hsub = 2, .vsub = 2, .is_yuv = true },
@@ -1335,6 +1590,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc;
struct intel_plane *plane;
+ if (!HAS_DISPLAY(dev_priv))
+ return 0;
+
/*
* We assume the primary plane for pipe A has
* the highest stride limits of them all,
@@ -1360,14 +1618,13 @@ u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
*
* The new CCS hash mode makes remapping impossible
*/
- if (!is_ccs_modifier(modifier)) {
- if (DISPLAY_VER(dev_priv) >= 7)
- return 256*1024;
- else if (DISPLAY_VER(dev_priv) >= 4)
- return 128*1024;
- }
-
- return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
+ if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
+ intel_modifier_uses_dpt(dev_priv, modifier))
+ return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
+ else if (DISPLAY_VER(dev_priv) >= 7)
+ return 256 * 1024;
+ else
+ return 128 * 1024;
}
static u32
@@ -1403,7 +1660,7 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* require the entire fb to accommodate that to avoid
* potential runtime errors at plane configuration time.
*/
- if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
color_plane == 0 && fb->width > 3840)
tile_width *= 4;
/*
@@ -1438,7 +1695,8 @@ initial_plane_vma(struct drm_i915_private *i915,
* important and we should probably use that space with FBC or other
* features.
*/
- if (size * 2 > i915->stolen_usable_size)
+ if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ size * 2 > i915->stolen_usable_size)
return NULL;
obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
@@ -1606,13 +1864,56 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
- if (IS_DISPLAY_VER(dev_priv, 2) && !crtc_state->active_planes)
+ if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
intel_disable_plane(plane, crtc_state);
intel_wait_for_vblank(dev_priv, crtc->pipe);
}
+static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
+{
+ struct drm_i915_private *i915 = vm->i915;
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ atomic_inc(&i915->gpu_error.pending_fb_pin);
+
+ vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096,
+ HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ goto err;
+
+ iomem = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(iomem)) {
+ vma = iomem;
+ goto err;
+ }
+
+ dpt->vma = vma;
+ dpt->iomem = iomem;
+
+ i915_vma_get(vma);
+
+err:
+ atomic_dec(&i915->gpu_error.pending_fb_pin);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return vma;
+}
+
+static void intel_dpt_unpin(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_vma_unpin_iomap(dpt->vma);
+ i915_vma_put(dpt->vma);
+}
+
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -1658,12 +1959,12 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
continue;
state = to_intel_plane_state(c->primary->state);
- if (!state->vma)
+ if (!state->ggtt_vma)
continue;
if (intel_plane_ggtt_offset(state) == plane_config->base) {
fb = state->hw.fb;
- vma = state->vma;
+ vma = state->ggtt_vma;
goto valid_fb;
}
}
@@ -1690,7 +1991,7 @@ valid_fb:
&intel_state->view);
__i915_vma_pin(vma);
- intel_state->vma = i915_vma_get(vma);
+ intel_state->ggtt_vma = i915_vma_get(vma);
if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
if (vma->fence)
intel_state->flags |= PLANE_HAS_FENCE;
@@ -1913,6 +2214,21 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
* across pipe
*/
tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
+
+ /*
+ * "The underrun recovery mechanism should be disabled
+ * when the following is enabled for this pipe:
+ * WiDi
+ * Downscaling (this includes YUV420 fullblend)
+ * COG
+ * DSC
+ * PSR2"
+ *
+ * FIXME: enable whenever possible...
+ */
+ if (IS_ALDERLAKE_P(dev_priv))
+ tmp |= UNDERRUN_RECOVERY_DISABLE;
+
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
@@ -2469,7 +2785,7 @@ static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
return false;
/* WA Display #0827: Gen9:all */
- if (IS_DISPLAY_VER(dev_priv, 9))
+ if (DISPLAY_VER(dev_priv) == 9)
return true;
return false;
@@ -2480,7 +2796,7 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/* Wa_2006604312:icl,ehl */
- if (crtc_state->scaler_state.scaler_users > 0 && IS_DISPLAY_VER(dev_priv, 11))
+ if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
return true;
return false;
@@ -2680,7 +2996,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* chance of catching underruns with the intermediate watermarks
* vs. the old plane configuration.
*/
- if (IS_DISPLAY_VER(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
+ if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
@@ -3116,6 +3432,7 @@ static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(master->base.dev);
struct intel_crtc_state *master_crtc_state;
struct drm_connector_state *conn_state;
struct drm_connector *conn;
@@ -3149,6 +3466,9 @@ static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
/* and DSC on slave */
intel_dsc_enable(NULL, crtc_state);
}
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ intel_uncompressed_joiner_enable(crtc_state);
}
static void hsw_crtc_enable(struct intel_atomic_state *state,
@@ -3199,7 +3519,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
crtc->active = true;
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
- psl_clkgate_wa = IS_DISPLAY_VER(dev_priv, 10) &&
+ psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
new_crtc_state->pch_pfit.enabled;
if (psl_clkgate_wa)
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
@@ -3376,7 +3696,9 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (IS_TIGERLAKE(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv))
+ return phy >= PHY_F && phy <= PHY_I;
+ else if (IS_TIGERLAKE(dev_priv))
return phy >= PHY_D && phy <= PHY_I;
else if (IS_ICELAKE(dev_priv))
return phy >= PHY_C && phy <= PHY_F;
@@ -3386,7 +3708,11 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
{
- if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
+ if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
+ return PHY_D + port - PORT_D_XELPD;
+ else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
+ return PHY_F + port - PORT_TC1;
+ else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
return PHY_B + port - PORT_TC1;
else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
return PHY_C + port - PORT_TC1;
@@ -3653,7 +3979,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
crtc->active = true;
- if (!IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) != 2)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_encoders_pre_enable(state, crtc);
@@ -3678,7 +4004,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
/* prevents spurious underruns */
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
intel_wait_for_vblank(dev_priv, pipe);
}
@@ -3709,7 +4035,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
intel_wait_for_vblank(dev_priv, pipe);
intel_encoders_disable(state, crtc);
@@ -3733,7 +4059,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_encoders_post_pll_disable(state, crtc);
- if (!IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) != 2)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
if (!dev_priv->display.initial_watermarks)
@@ -3839,6 +4165,9 @@ int intel_display_suspend(struct drm_device *dev)
struct drm_atomic_state *state;
int ret;
+ if (!HAS_DISPLAY(dev_priv))
+ return 0;
+
state = drm_atomic_helper_suspend(dev);
ret = PTR_ERR_OR_ZERO(state);
if (ret)
@@ -3979,7 +4308,7 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
{
u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
- unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
+ struct drm_rect src;
/*
* We only use IF-ID interlacing. If we ever use
@@ -3989,23 +4318,12 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
if (!crtc_state->pch_pfit.enabled)
return pixel_rate;
- pipe_w = crtc_state->pipe_src_w;
- pipe_h = crtc_state->pipe_src_h;
-
- pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
- pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
-
- if (pipe_w < pfit_w)
- pipe_w = pfit_w;
- if (pipe_h < pfit_h)
- pipe_h = pfit_h;
+ drm_rect_init(&src, 0, 0,
+ crtc_state->pipe_src_w << 16,
+ crtc_state->pipe_src_h << 16);
- if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
- !pfit_w || !pfit_h))
- return pixel_rate;
-
- return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
- pfit_w * pfit_h);
+ return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
+ pixel_rate);
}
static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
@@ -4297,7 +4615,7 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
* Strictly speaking some registers are available before
* gen7, but we only support DRRS on gen7+
*/
- return IS_DISPLAY_VER(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
+ return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
}
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -4444,7 +4762,7 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
return false;
if (DISPLAY_VER(dev_priv) >= 9 ||
@@ -5419,8 +5737,12 @@ static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = 0;
+ int i;
switch (crtc_state->pipe_bpp) {
case 18:
@@ -5459,6 +5781,23 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(dev_priv) >= 12)
val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ bool scaler_in_use = false;
+
+ for (i = 0; i < crtc->num_scalers; i++) {
+ if (!scaler_state->scalers[i].in_use)
+ continue;
+
+ scaler_in_use = true;
+ break;
+ }
+
+ intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
+ PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
+ scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
+ PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
+ }
+
intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
}
@@ -5639,7 +5978,7 @@ static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now.
*/
- drm_WARN_ON(&dev_priv->drm, IS_DISPLAY_VER(dev_priv, 7) &&
+ drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
(ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
}
@@ -5952,13 +6291,15 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
- if (IS_GEN9_LP(dev_priv) &&
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
drm_WARN_ON(&dev_priv->drm, active);
active = true;
}
intel_dsc_get_config(pipe_config);
+ if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
+ intel_uncompressed_joiner_get_config(pipe_config);
if (!active) {
/* bigjoiner slave doesn't enable transcoder */
@@ -6322,7 +6663,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
return dev_priv->vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
- else if (!IS_DISPLAY_VER(dev_priv, 2))
+ else if (DISPLAY_VER(dev_priv) != 2)
return 96000;
else
return 48000;
@@ -6355,7 +6696,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (!IS_DISPLAY_VER(dev_priv, 2)) {
+ if (DISPLAY_VER(dev_priv) != 2) {
if (IS_PINEVIEW(dev_priv))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -6870,7 +7211,8 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
crtc_state->pixel_rate);
/* Display WA #1135: BXT:ALL GLK:ALL */
- if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ dev_priv->ipc_enabled)
linetime_wm /= 2;
return min(linetime_wm, 0x1ff);
@@ -7333,10 +7675,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(DP_SDP_VSC))
intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
- drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
yesno(pipe_config->vrr.enable),
pipe_config->vrr.vmin, pipe_config->vrr.vmax,
- pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
+ pipe_config->vrr.flipline,
intel_vrr_vmin_vblank_start(pipe_config),
intel_vrr_vmax_vblank_start(pipe_config));
@@ -7972,6 +8315,16 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
+ if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected 0x%08x, found 0x%08x)", \
+ current_config->name & (mask), \
+ pipe_config->name & (mask)); \
+ ret = false; \
+ } \
+} while (0)
+
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
@@ -8260,6 +8613,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
if (bp_gamma)
PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
+
+ PIPE_CONF_CHECK_BOOL(has_psr);
+ PIPE_CONF_CHECK_BOOL(has_psr2);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
+ PIPE_CONF_CHECK_I(dc3co_exitline);
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -8313,7 +8671,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(min_voltage_level);
}
- PIPE_CONF_CHECK_X(infoframes.enable);
+ if (fastset && (current_config->has_psr || pipe_config->has_psr))
+ PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
+ ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
+ else
+ PIPE_CONF_CHECK_X(infoframes.enable);
+
PIPE_CONF_CHECK_X(infoframes.gcp);
PIPE_CONF_CHECK_INFOFRAME(avi);
PIPE_CONF_CHECK_INFOFRAME(spd);
@@ -8342,6 +8705,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.vmax);
PIPE_CONF_CHECK_I(vrr.flipline);
PIPE_CONF_CHECK_I(vrr.pipeline_full);
+ PIPE_CONF_CHECK_I(vrr.guardband);
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
@@ -8447,6 +8811,38 @@ static void verify_wm_state(struct intel_crtc *crtc,
hw_wm_level->lines);
}
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(dev_priv) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(dev_priv) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
/* DDB */
hw_ddb_entry = &hw->ddb_y[plane->id];
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
@@ -8736,6 +9132,44 @@ intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
verify_disabled_dpll_state(dev_priv);
}
+int intel_modeset_all_pipes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ /*
+ * Add all pipes to the state, and force
+ * a modeset on all the active ones.
+ */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+ continue;
+
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+
+ crtc_state->update_planes |= crtc_state->active_planes;
+ }
+
+ return 0;
+}
+
static void
intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
@@ -8782,7 +9216,7 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
- if (IS_DISPLAY_VER(dev_priv, 2)) {
+ if (DISPLAY_VER(dev_priv) == 2) {
int vtotal;
vtotal = adjusted_mode.crtc_vtotal;
@@ -9184,7 +9618,6 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
struct intel_crtc *slave, *master;
@@ -9200,15 +9633,15 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
if (!new_crtc_state->bigjoiner)
return 0;
- if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
+ slave = intel_dsc_get_bigjoiner_secondary(crtc);
+ if (!slave) {
DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
"CRTC + 1 to be used, doesn't exist\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
- slave = new_crtc_state->bigjoiner_linked_crtc =
- intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
+ new_crtc_state->bigjoiner_linked_crtc = slave;
slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
master = crtc;
if (IS_ERR(slave_crtc_state))
@@ -9582,6 +10015,9 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ if (intel_any_crtc_needs_modeset(state))
+ any_ms = true;
+
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
@@ -9659,7 +10095,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!IS_DISPLAY_VER(dev_priv, 2) || crtc_state->active_planes)
+ if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc_state->has_pch_encoder) {
@@ -9688,8 +10124,6 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
/* on skylake this is done by detaching scalers */
if (DISPLAY_VER(dev_priv) >= 9) {
- skl_detach_scalers(new_crtc_state);
-
if (new_crtc_state->pch_pfit.enabled)
skl_pfit_enable(new_crtc_state);
} else if (HAS_PCH_SPLIT(dev_priv)) {
@@ -9715,8 +10149,8 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
icl_set_pipe_chicken(crtc);
}
-static void commit_pipe_config(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void commit_pipe_pre_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
@@ -9734,9 +10168,6 @@ static void commit_pipe_config(struct intel_atomic_state *state,
new_crtc_state->update_pipe)
intel_color_commit(new_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9)
- skl_detach_scalers(new_crtc_state);
-
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
bdw_set_pipemisc(new_crtc_state);
@@ -9750,6 +10181,23 @@ static void commit_pipe_config(struct intel_atomic_state *state,
dev_priv->display.atomic_update_watermarks(state, crtc);
}
+static void commit_pipe_post_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /*
+ * Disable the scaler(s) after the plane(s) so that we don't
+ * get a catastrophic underrun even if the two operations
+ * end up happening in two different frames.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 &&
+ !intel_crtc_needs_modeset(new_crtc_state))
+ skl_detach_scalers(new_crtc_state);
+}
+
static void intel_enable_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -9801,13 +10249,15 @@ static void intel_update_crtc(struct intel_atomic_state *state,
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
- commit_pipe_config(state, crtc);
+ commit_pipe_pre_planes(state, crtc);
if (DISPLAY_VER(dev_priv) >= 9)
skl_update_planes_on_crtc(state, crtc);
else
i9xx_update_planes_on_crtc(state, crtc);
+ commit_pipe_post_planes(state, crtc);
+
intel_pipe_update_end(new_crtc_state);
/*
@@ -10277,7 +10727,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* chance of catching underruns with the intermediate watermarks
* vs. the new plane configuration.
*/
- if (IS_DISPLAY_VER(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
+ if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (dev_priv->display.optimize_watermarks)
@@ -10541,25 +10991,60 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
plane->id == PLANE_CURSOR &&
INTEL_INFO(dev_priv)->display.cursor_needs_physical;
- vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
- &plane_state->view.gtt,
- intel_plane_uses_fence(plane_state),
- &plane_state->flags);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ if (!intel_fb_uses_dpt(fb)) {
+ vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
+ &plane_state->view.gtt,
+ intel_plane_uses_fence(plane_state),
+ &plane_state->flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ vma = intel_dpt_pin(intel_fb->dpt_vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- plane_state->vma = vma;
+ plane_state->ggtt_vma = vma;
+
+ vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
+ &plane_state->flags, intel_fb->dpt_vm);
+ if (IS_ERR(vma)) {
+ intel_dpt_unpin(intel_fb->dpt_vm);
+ plane_state->ggtt_vma = NULL;
+ return PTR_ERR(vma);
+ }
+
+ plane_state->dpt_vma = vma;
+
+ WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
+ }
return 0;
}
void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
{
+ struct drm_framebuffer *fb = old_plane_state->hw.fb;
struct i915_vma *vma;
- vma = fetch_and_zero(&old_plane_state->vma);
- if (vma)
- intel_unpin_fb_vma(vma, old_plane_state->flags);
+ if (!intel_fb_uses_dpt(fb)) {
+ vma = fetch_and_zero(&old_plane_state->ggtt_vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ vma = fetch_and_zero(&old_plane_state->dpt_vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+
+ vma = fetch_and_zero(&old_plane_state->ggtt_vma);
+ if (vma)
+ intel_dpt_unpin(intel_fb->dpt_vm);
+ }
}
/**
@@ -10650,7 +11135,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret < 0)
goto unpin_fb;
- fence = dma_resv_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_unlocked(obj->base.resv);
if (fence) {
add_rps_boost_after_vblank(new_plane_state->hw.crtc,
fence);
@@ -10829,7 +11314,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ALDERLAKE_S(dev_priv)) {
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_TC1);
+ intel_ddi_init(dev_priv, PORT_TC2);
+ intel_ddi_init(dev_priv, PORT_TC3);
+ intel_ddi_init(dev_priv, PORT_TC4);
+ } else if (IS_ALDERLAKE_S(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_TC1);
intel_ddi_init(dev_priv, PORT_TC2);
@@ -10856,61 +11348,38 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_C);
intel_ddi_init(dev_priv, PORT_D);
icl_dsi_init(dev_priv);
- } else if (IS_DISPLAY_VER(dev_priv, 11)) {
+ } else if (DISPLAY_VER(dev_priv) == 11) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
intel_ddi_init(dev_priv, PORT_D);
intel_ddi_init(dev_priv, PORT_E);
- /*
- * On some ICL SKUs port F is not present. No strap bits for
- * this, so rely on VBT.
- * Work around broken VBTs on SKUs known to have no port F.
- */
- if (IS_ICL_WITH_PORT_F(dev_priv) &&
- intel_bios_is_port_present(dev_priv, PORT_F))
- intel_ddi_init(dev_priv, PORT_F);
-
+ intel_ddi_init(dev_priv, PORT_F);
icl_dsi_init(dev_priv);
- } else if (IS_GEN9_LP(dev_priv)) {
- /*
- * FIXME: Broxton doesn't support port detection via the
- * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
- * detect the ports.
- */
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
-
vlv_dsi_init(dev_priv);
+ } else if (DISPLAY_VER(dev_priv) >= 9) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
+ intel_ddi_init(dev_priv, PORT_F);
} else if (HAS_DDI(dev_priv)) {
- int found;
+ u32 found;
if (intel_ddi_crt_present(dev_priv))
intel_crt_init(dev_priv);
- /*
- * Haswell uses DDI functions to detect digital outputs.
- * On SKL pre-D0 the strap isn't connected. Later SKUs may or
- * may not have it - it was supposed to be fixed by the same
- * time we stopped using straps. Assume it's there.
- */
+ /* Haswell uses DDI functions to detect digital outputs. */
found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
- /* WaIgnoreDDIAStrap: skl */
- if (found || IS_GEN9_BC(dev_priv))
+ if (found)
intel_ddi_init(dev_priv, PORT_A);
- /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
- * register */
- if (HAS_PCH_TGP(dev_priv)) {
- /* W/A due to lack of STRAP config on TGP PCH*/
- found = (SFUSE_STRAP_DDIB_DETECTED |
- SFUSE_STRAP_DDIC_DETECTED |
- SFUSE_STRAP_DDID_DETECTED);
- } else {
- found = intel_de_read(dev_priv, SFUSE_STRAP);
- }
-
+ found = intel_de_read(dev_priv, SFUSE_STRAP);
if (found & SFUSE_STRAP_DDIB_DETECTED)
intel_ddi_init(dev_priv, PORT_B);
if (found & SFUSE_STRAP_DDIC_DETECTED)
@@ -10919,13 +11388,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_D);
if (found & SFUSE_STRAP_DDIF_DETECTED)
intel_ddi_init(dev_priv, PORT_F);
- /*
- * On SKL we don't have a way to detect DDI-E so we rely on VBT.
- */
- if (IS_GEN9_BC(dev_priv) &&
- intel_bios_is_port_present(dev_priv, PORT_E))
- intel_ddi_init(dev_priv, PORT_E);
-
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
@@ -11013,7 +11475,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (IS_PINEVIEW(dev_priv)) {
intel_lvds_init(dev_priv);
intel_crt_init(dev_priv);
- } else if (IS_DISPLAY_RANGE(dev_priv, 3, 4)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
bool found = false;
if (IS_MOBILE(dev_priv))
@@ -11057,7 +11519,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (SUPPORTS_TV(dev_priv))
intel_tv_init(dev_priv);
- } else if (IS_DISPLAY_VER(dev_priv, 2)) {
+ } else if (DISPLAY_VER(dev_priv) == 2) {
if (IS_I85X(dev_priv))
intel_lvds_init(dev_priv);
@@ -11082,6 +11544,10 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
+
+ if (intel_fb_uses_dpt(fb))
+ intel_dpt_destroy(intel_fb->dpt_vm);
+
intel_frontbuffer_put(intel_fb->frontbuffer);
kfree(intel_fb);
@@ -11245,13 +11711,36 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
}
+ /* TODO: Add POT stride remapping support for CCS formats as well. */
+ if (IS_ALDERLAKE_P(dev_priv) &&
+ mode_cmd->modifier[i] != DRM_FORMAT_MOD_LINEAR &&
+ !intel_fb_needs_pot_stride_remap(intel_fb) &&
+ !is_power_of_2(mode_cmd->pitches[i])) {
+ drm_dbg_kms(&dev_priv->drm,
+ "plane %d pitch (%d) must be power of two for tiled buffers\n",
+ i, mode_cmd->pitches[i]);
+ goto err;
+ }
+
fb->obj[i] = &obj->base;
}
- ret = intel_fill_fb_info(dev_priv, fb);
+ ret = intel_fill_fb_info(dev_priv, intel_fb);
if (ret)
goto err;
+ if (intel_fb_uses_dpt(fb)) {
+ struct i915_address_space *vm;
+
+ vm = intel_dpt_create(intel_fb);
+ if (IS_ERR(vm)) {
+ ret = PTR_ERR(vm);
+ goto err;
+ }
+
+ intel_fb->dpt_vm = vm;
+ }
+
ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
if (ret) {
drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
@@ -11273,11 +11762,20 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
+ struct drm_i915_private *i915;
obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
if (!obj)
return ERR_PTR(-ENOENT);
+ /* object is backed with LMEM for discrete */
+ i915 = to_i915(obj->base.dev);
+ if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) {
+ /* object is "remote", not in local memory */
+ i915_gem_object_put(obj);
+ return ERR_PTR(-EREMOTE);
+ }
+
fb = intel_framebuffer_create(obj, &mode_cmd);
i915_gem_object_put(obj);
@@ -11429,6 +11927,9 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
*/
void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
intel_init_cdclk_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
@@ -11471,8 +11972,12 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
void intel_modeset_init_hw(struct drm_i915_private *i915)
{
- struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->cdclk.obj.state);
+ struct intel_cdclk_state *cdclk_state;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
intel_update_cdclk(i915);
intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
@@ -11705,8 +12210,6 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->preferred_depth = 24;
mode_config->prefer_shadow = 1;
- mode_config->allow_fb_modifiers = true;
-
mode_config->funcs = &intel_mode_funcs;
mode_config->async_page_flip = has_async_flips(i915);
@@ -11721,7 +12224,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
} else if (DISPLAY_VER(i915) >= 4) {
mode_config->max_width = 8192;
mode_config->max_height = 8192;
- } else if (IS_DISPLAY_VER(i915, 3)) {
+ } else if (DISPLAY_VER(i915) == 3) {
mode_config->max_width = 4096;
mode_config->max_height = 4096;
} else {
@@ -11788,7 +12291,10 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
/* FIXME: completely on the wrong abstraction layer */
intel_power_domains_init_hw(i915, false);
- intel_csr_ucode_init(i915);
+ if (!HAS_DISPLAY(i915))
+ return 0;
+
+ intel_dmc_ucode_init(i915);
i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
@@ -11796,19 +12302,21 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
i915->framestart_delay = 1; /* 1-4 */
+ i915->window2_delay = 0; /* No DSB so no window2 delay */
+
intel_mode_config_init(i915);
ret = intel_cdclk_init(i915);
if (ret)
- goto cleanup_vga_client_pw_domain_csr;
+ goto cleanup_vga_client_pw_domain_dmc;
ret = intel_dbuf_init(i915);
if (ret)
- goto cleanup_vga_client_pw_domain_csr;
+ goto cleanup_vga_client_pw_domain_dmc;
ret = intel_bw_init(i915);
if (ret)
- goto cleanup_vga_client_pw_domain_csr;
+ goto cleanup_vga_client_pw_domain_dmc;
init_llist_head(&i915->atomic_helper.free_list);
INIT_WORK(&i915->atomic_helper.free_work,
@@ -11820,8 +12328,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
return 0;
-cleanup_vga_client_pw_domain_csr:
- intel_csr_ucode_fini(i915);
+cleanup_vga_client_pw_domain_dmc:
+ intel_dmc_ucode_fini(i915);
intel_power_domains_driver_remove(i915);
intel_vga_unregister(i915);
cleanup_bios:
@@ -11838,6 +12346,9 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
struct intel_crtc *crtc;
int ret;
+ if (!HAS_DISPLAY(i915))
+ return 0;
+
intel_init_pm(i915);
intel_panel_sanitize_ssc(i915);
@@ -11850,13 +12361,11 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
INTEL_NUM_PIPES(i915),
INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
- if (HAS_DISPLAY(i915)) {
- for_each_pipe(i915, pipe) {
- ret = intel_crtc_init(i915, pipe);
- if (ret) {
- intel_mode_config_cleanup(i915);
- return ret;
- }
+ for_each_pipe(i915, pipe) {
+ ret = intel_crtc_init(i915, pipe);
+ if (ret) {
+ intel_mode_config_cleanup(i915);
+ return ret;
}
}
@@ -12610,7 +13119,7 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
* Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
* Also known as Wa_14010480278.
*/
- if (IS_DISPLAY_RANGE(dev_priv, 10, 12))
+ if (IS_DISPLAY_VER(dev_priv, 10, 12))
intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
@@ -12791,6 +13300,9 @@ void intel_display_resume(struct drm_device *dev)
struct drm_modeset_acquire_ctx ctx;
int ret;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
dev_priv->modeset_restore_state = NULL;
if (state)
state->acquire_ctx = &ctx;
@@ -12840,6 +13352,9 @@ static void intel_hpd_poll_fini(struct drm_i915_private *i915)
/* part #1: call before irq uninstall */
void intel_modeset_driver_remove(struct drm_i915_private *i915)
{
+ if (!HAS_DISPLAY(i915))
+ return;
+
flush_workqueue(i915->flip_wq);
flush_workqueue(i915->modeset_wq);
@@ -12850,6 +13365,9 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
/* part #2: call after irq uninstall */
void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
{
+ if (!HAS_DISPLAY(i915))
+ return;
+
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@@ -12890,7 +13408,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
/* part #3: call after gem init */
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
{
- intel_csr_ucode_fini(i915);
+ intel_dmc_ucode_fini(i915);
intel_power_domains_driver_remove(i915);
@@ -12949,207 +13467,3 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
acpi_video_unregister();
intel_opregion_unregister(i915);
}
-
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-struct intel_display_error_state {
-
- u32 power_well_driver;
-
- struct intel_cursor_error_state {
- u32 control;
- u32 position;
- u32 base;
- u32 size;
- } cursor[I915_MAX_PIPES];
-
- struct intel_pipe_error_state {
- bool power_domain_on;
- u32 source;
- u32 stat;
- } pipe[I915_MAX_PIPES];
-
- struct intel_plane_error_state {
- u32 control;
- u32 stride;
- u32 size;
- u32 pos;
- u32 addr;
- u32 surface;
- u32 tile_offset;
- } plane[I915_MAX_PIPES];
-
- struct intel_transcoder_error_state {
- bool available;
- bool power_domain_on;
- enum transcoder cpu_transcoder;
-
- u32 conf;
-
- u32 htotal;
- u32 hblank;
- u32 hsync;
- u32 vtotal;
- u32 vblank;
- u32 vsync;
- } transcoder[5];
-};
-
-struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_i915_private *dev_priv)
-{
- struct intel_display_error_state *error;
- int transcoders[] = {
- TRANSCODER_A,
- TRANSCODER_B,
- TRANSCODER_C,
- TRANSCODER_D,
- TRANSCODER_EDP,
- };
- int i;
-
- BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
-
- if (!HAS_DISPLAY(dev_priv))
- return NULL;
-
- error = kzalloc(sizeof(*error), GFP_ATOMIC);
- if (error == NULL)
- return NULL;
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- error->power_well_driver = intel_de_read(dev_priv,
- HSW_PWR_WELL_CTL2);
-
- for_each_pipe(dev_priv, i) {
- error->pipe[i].power_domain_on =
- __intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(i));
- if (!error->pipe[i].power_domain_on)
- continue;
-
- error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
- error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
- error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
-
- error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
- error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
- if (DISPLAY_VER(dev_priv) <= 3) {
- error->plane[i].size = intel_de_read(dev_priv,
- DSPSIZE(i));
- error->plane[i].pos = intel_de_read(dev_priv,
- DSPPOS(i));
- }
- if (DISPLAY_VER(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
- error->plane[i].addr = intel_de_read(dev_priv,
- DSPADDR(i));
- if (DISPLAY_VER(dev_priv) >= 4) {
- error->plane[i].surface = intel_de_read(dev_priv,
- DSPSURF(i));
- error->plane[i].tile_offset = intel_de_read(dev_priv,
- DSPTILEOFF(i));
- }
-
- error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
-
- if (HAS_GMCH(dev_priv))
- error->pipe[i].stat = intel_de_read(dev_priv,
- PIPESTAT(i));
- }
-
- for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
- enum transcoder cpu_transcoder = transcoders[i];
-
- if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
- continue;
-
- error->transcoder[i].available = true;
- error->transcoder[i].power_domain_on =
- __intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_TRANSCODER(cpu_transcoder));
- if (!error->transcoder[i].power_domain_on)
- continue;
-
- error->transcoder[i].cpu_transcoder = cpu_transcoder;
-
- error->transcoder[i].conf = intel_de_read(dev_priv,
- PIPECONF(cpu_transcoder));
- error->transcoder[i].htotal = intel_de_read(dev_priv,
- HTOTAL(cpu_transcoder));
- error->transcoder[i].hblank = intel_de_read(dev_priv,
- HBLANK(cpu_transcoder));
- error->transcoder[i].hsync = intel_de_read(dev_priv,
- HSYNC(cpu_transcoder));
- error->transcoder[i].vtotal = intel_de_read(dev_priv,
- VTOTAL(cpu_transcoder));
- error->transcoder[i].vblank = intel_de_read(dev_priv,
- VBLANK(cpu_transcoder));
- error->transcoder[i].vsync = intel_de_read(dev_priv,
- VSYNC(cpu_transcoder));
- }
-
- return error;
-}
-
-#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
-
-void
-intel_display_print_error_state(struct drm_i915_error_state_buf *m,
- struct intel_display_error_state *error)
-{
- struct drm_i915_private *dev_priv = m->i915;
- int i;
-
- if (!error)
- return;
-
- err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- err_printf(m, "PWR_WELL_CTL2: %08x\n",
- error->power_well_driver);
- for_each_pipe(dev_priv, i) {
- err_printf(m, "Pipe [%d]:\n", i);
- err_printf(m, " Power: %s\n",
- onoff(error->pipe[i].power_domain_on));
- err_printf(m, " SRC: %08x\n", error->pipe[i].source);
- err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
-
- err_printf(m, "Plane [%d]:\n", i);
- err_printf(m, " CNTR: %08x\n", error->plane[i].control);
- err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
- if (DISPLAY_VER(dev_priv) <= 3) {
- err_printf(m, " SIZE: %08x\n", error->plane[i].size);
- err_printf(m, " POS: %08x\n", error->plane[i].pos);
- }
- if (DISPLAY_VER(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
- err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
- if (DISPLAY_VER(dev_priv) >= 4) {
- err_printf(m, " SURF: %08x\n", error->plane[i].surface);
- err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
- }
-
- err_printf(m, "Cursor [%d]:\n", i);
- err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
- err_printf(m, " POS: %08x\n", error->cursor[i].position);
- err_printf(m, " BASE: %08x\n", error->cursor[i].base);
- }
-
- for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
- if (!error->transcoder[i].available)
- continue;
-
- err_printf(m, "CPU transcoder: %s\n",
- transcoder_name(error->transcoder[i].cpu_transcoder));
- err_printf(m, " Power: %s\n",
- onoff(error->transcoder[i].power_domain_on));
- err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
- err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
- err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
- err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
- err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
- err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
- err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
- }
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 105294ec2dcc..c9dbaf074d77 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -37,13 +37,13 @@ struct drm_encoder;
struct drm_file;
struct drm_format_info;
struct drm_framebuffer;
-struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
struct drm_i915_private;
struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
struct drm_plane;
struct drm_plane_state;
+struct i915_address_space;
struct i915_ggtt_view;
struct intel_atomic_state;
struct intel_crtc;
@@ -188,12 +188,13 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
-#define for_each_dbuf_slice_in_mask(__slice, __mask) \
+#define for_each_dbuf_slice(__dev_priv, __slice) \
for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
- for_each_if((BIT(__slice)) & (__mask))
+ for_each_if(INTEL_INFO(__dev_priv)->dbuf.slice_mask & BIT(__slice))
-#define for_each_dbuf_slice(__slice) \
- for_each_dbuf_slice_in_mask(__slice, BIT(I915_MAX_DBUF_SLICES) - 1)
+#define for_each_dbuf_slice_in_mask(__dev_priv, __slice, __mask) \
+ for_each_dbuf_slice((__dev_priv), (__slice)) \
+ for_each_if((__mask) & BIT(__slice))
enum port {
PORT_NONE = -1,
@@ -216,6 +217,10 @@ enum port {
PORT_TC5,
PORT_TC6,
+ /* XE_LPD repositions D/E offsets and bitfields */
+ PORT_D_XELPD = PORT_TC5,
+ PORT_E_XELPD,
+
I915_MAX_PORTS
};
@@ -299,6 +304,10 @@ enum aux_ch {
AUX_CH_USBC4,
AUX_CH_USBC5,
AUX_CH_USBC6,
+
+ /* XE_LPD repositions D/E offsets and bitfields */
+ AUX_CH_D_XELPD = AUX_CH_USBC5,
+ AUX_CH_E_XELPD,
};
#define aux_ch_name(a) ((a) + 'A')
@@ -556,9 +565,6 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
-void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
-void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
@@ -597,9 +603,6 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
- struct dpll *best_clock);
-int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
@@ -616,11 +619,6 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
-struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_i915_private *dev_priv);
-void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct intel_display_error_state *error);
-
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier);
@@ -648,6 +646,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
+int intel_modeset_all_pipes(struct intel_atomic_state *state);
/* modesetting asserts */
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 564509a4e666..88bb05d5c483 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -7,10 +7,11 @@
#include <drm/drm_fourcc.h>
#include "i915_debugfs.h"
-#include "intel_csr.h"
#include "intel_display_debugfs.h"
#include "intel_display_power.h"
+#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_fbc.h"
#include "intel_hdcp.h"
@@ -531,24 +532,24 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
- struct intel_csr *csr;
+ struct intel_dmc *dmc;
i915_reg_t dc5_reg, dc6_reg = {};
- if (!HAS_CSR(dev_priv))
+ if (!HAS_DMC(dev_priv))
return -ENODEV;
- csr = &dev_priv->csr;
+ dmc = &dev_priv->dmc;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
- seq_printf(m, "path: %s\n", csr->fw_path);
+ seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
+ seq_printf(m, "path: %s\n", dmc->fw_path);
- if (!csr->dmc_payload)
+ if (!intel_dmc_has_payload(dev_priv))
goto out;
- seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version));
+ seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
if (DISPLAY_VER(dev_priv) >= 12) {
if (IS_DGFX(dev_priv)) {
@@ -567,10 +568,10 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "DC3CO count: %d\n",
intel_de_read(dev_priv, DMC_DEBUG3));
} else {
- dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
- SKL_CSR_DC3_DC5_COUNT;
- if (!IS_GEN9_LP(dev_priv))
- dc6_reg = SKL_CSR_DC5_DC6_COUNT;
+ dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
+ SKL_DMC_DC3_DC5_COUNT;
+ if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv))
+ dc6_reg = SKL_DMC_DC5_DC6_COUNT;
}
seq_printf(m, "DC3 -> DC5 count: %d\n",
@@ -581,10 +582,10 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
out:
seq_printf(m, "program base: 0x%08x\n",
- intel_de_read(dev_priv, CSR_PROGRAM(0)));
+ intel_de_read(dev_priv, DMC_PROGRAM(0)));
seq_printf(m, "ssp base: 0x%08x\n",
- intel_de_read(dev_priv, CSR_SSP_BASE));
- seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
+ intel_de_read(dev_priv, DMC_SSP_BASE));
+ seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -1339,6 +1340,12 @@ static int i915_lpsp_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
+ if (DISPLAY_VER(i915) >= 13) {
+ LPSP_STATUS(!intel_lpsp_power_well_enabled(i915,
+ SKL_DISP_PW_2));
+ return 0;
+ }
+
switch (DISPLAY_VER(i915)) {
case 12:
case 11:
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 99126caf5747..4298ae684d7d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -9,9 +9,10 @@
#include "i915_irq.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
-#include "intel_csr.h"
#include "intel_display_power.h"
+#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dpio_phy.h"
#include "intel_hotplug.h"
#include "intel_pm.h"
@@ -290,8 +291,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
-static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
{
int pw_idx = power_well->desc->hsw.idx;
@@ -326,6 +326,15 @@ aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
return dig_port;
}
+static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
+ const struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+
+ return intel_port_to_phy(i915, dig_port->base.port);
+}
+
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well,
bool timeout_expected)
@@ -467,15 +476,13 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-#define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
-
static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
@@ -507,7 +514,7 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
@@ -550,7 +557,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
- if (IS_DISPLAY_VER(dev_priv, 11) && dig_port->tc_legacy_port)
+ if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
return;
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
@@ -594,7 +601,7 @@ static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
bool is_tbt = power_well->desc->hsw.is_tc_tbt;
@@ -618,11 +625,9 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
* or need to enable AUX on a legacy TypeC port as part of the TC-cold
* exit sequence.
*/
- timeout_expected = is_tbt;
- if (IS_DISPLAY_VER(dev_priv, 11) && dig_port->tc_legacy_port) {
+ timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
+ if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
icl_tc_cold_exit(dev_priv);
- timeout_expected = true;
- }
hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
@@ -644,7 +649,7 @@ static void
icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
@@ -656,11 +661,9 @@ static void
icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
- bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- if (is_tbt || intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(dev_priv, phy))
return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
else if (IS_ICELAKE(dev_priv))
return icl_combo_phy_aux_power_well_enable(dev_priv,
@@ -673,11 +676,9 @@ static void
icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
- bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- if (is_tbt || intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(dev_priv, phy))
return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
else if (IS_ICELAKE(dev_priv))
return icl_combo_phy_aux_power_well_disable(dev_priv,
@@ -709,7 +710,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
* BIOS's own request bits, which are forced-on for these power wells
* when exiting DC5/6.
*/
- if (IS_DISPLAY_VER(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
val |= intel_de_read(dev_priv, regs->bios);
@@ -807,9 +808,9 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 12)
mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
| DC_STATE_EN_DC9;
- else if (IS_DISPLAY_VER(dev_priv, 11))
+ else if (DISPLAY_VER(dev_priv) == 11)
mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
mask |= DC_STATE_EN_DC9;
else
mask |= DC_STATE_EN_UPTO_DC6;
@@ -821,12 +822,15 @@ static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
{
u32 val;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
drm_dbg_kms(&dev_priv->drm,
"Resetting DC state tracking from %02x to %02x\n",
- dev_priv->csr.dc_state, val);
- dev_priv->csr.dc_state = val;
+ dev_priv->dmc.dc_state, val);
+ dev_priv->dmc.dc_state = val;
}
/**
@@ -857,9 +861,12 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
u32 val;
u32 mask;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->csr.allowed_dc_mask))
- state &= dev_priv->csr.allowed_dc_mask;
+ state & ~dev_priv->dmc.allowed_dc_mask))
+ state &= dev_priv->dmc.allowed_dc_mask;
val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
@@ -867,16 +874,16 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
val & mask, state);
/* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->csr.dc_state)
+ if ((val & mask) != dev_priv->dmc.dc_state)
drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->csr.dc_state, val & mask);
+ dev_priv->dmc.dc_state, val & mask);
val &= ~mask;
val |= state;
gen9_write_dc_state(dev_priv, val);
- dev_priv->csr.dc_state = val & mask;
+ dev_priv->dmc.dc_state = val & mask;
}
static u32
@@ -895,7 +902,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
if (target_dc_state != states[i])
continue;
- if (dev_priv->csr.allowed_dc_mask & target_dc_state)
+ if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
break;
target_dc_state = states[i + 1];
@@ -951,15 +958,15 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
intel_pps_unlock_regs_wa(dev_priv);
}
-static void assert_csr_loaded(struct drm_i915_private *dev_priv)
+static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
{
drm_WARN_ONCE(&dev_priv->drm,
- !intel_de_read(dev_priv, CSR_PROGRAM(0)),
- "CSR program storage start is NULL\n");
- drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
- "CSR SSP Base Not fine\n");
- drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
- "CSR HTP Not fine\n");
+ !intel_de_read(dev_priv, DMC_PROGRAM(0)),
+ "DMC program storage start is NULL\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
+ "DMC SSP Base Not fine\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL),
+ "DMC HTP Not fine\n");
}
static struct i915_power_well *
@@ -1009,7 +1016,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
state = sanitize_target_dc_state(dev_priv, state);
- if (state == dev_priv->csr.target_dc_state)
+ if (state == dev_priv->dmc.target_dc_state)
goto unlock;
dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
@@ -1021,7 +1028,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
if (!dc_off_enabled)
power_well->desc->ops->enable(dev_priv, power_well);
- dev_priv->csr.target_dc_state = state;
+ dev_priv->dmc.target_dc_state = state;
if (!dc_off_enabled)
power_well->desc->ops->disable(dev_priv, power_well);
@@ -1035,7 +1042,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
enum i915_power_well_id high_pg;
/* Power wells at this level and above must be disabled for DC5 entry */
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) == 12)
high_pg = ICL_DISP_PW_3;
else
high_pg = SKL_DISP_PW_2;
@@ -1050,7 +1057,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
"DC5 already programmed to be enabled.\n");
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
- assert_csr_loaded(dev_priv);
+ assert_dmc_loaded(dev_priv);
}
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
@@ -1060,7 +1067,7 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
/* Wa Display #1183: skl,kbl,cfl */
- if (IS_GEN9_BC(dev_priv))
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
@@ -1077,7 +1084,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be enabled.\n");
- assert_csr_loaded(dev_priv);
+ assert_dmc_loaded(dev_priv);
}
static void skl_enable_dc6(struct drm_i915_private *dev_priv)
@@ -1087,7 +1094,7 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
/* Wa Display #1183: skl,kbl,cfl */
- if (IS_GEN9_BC(dev_priv))
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
@@ -1174,13 +1181,16 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_config cdclk_config = {};
- if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
+ if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
return;
}
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
drm_WARN_ON(&dev_priv->drm,
@@ -1189,7 +1199,7 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
gen9_assert_dbuf_enabled(dev_priv);
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_verify_ddi_phy_power_wells(dev_priv);
if (DISPLAY_VER(dev_priv) >= 11)
@@ -1210,10 +1220,10 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if (!dev_priv->csr.dmc_payload)
+ if (!intel_dmc_has_payload(dev_priv))
return;
- switch (dev_priv->csr.target_dc_state) {
+ switch (dev_priv->dmc.target_dc_state) {
case DC_STATE_EN_DC3CO:
tgl_enable_dc3co(dev_priv);
break;
@@ -2255,6 +2265,12 @@ intel_display_power_put_async_work(struct work_struct *work)
fetch_and_zero(&power_domains->async_put_domains[1]);
queue_async_put_domains_work(power_domains,
fetch_and_zero(&new_work_wakeref));
+ } else {
+ /*
+ * Cancel the work that got queued after this one got dequeued,
+ * since here we released the corresponding async-put reference.
+ */
+ cancel_delayed_work(&power_domains->async_put_work);
}
out_verify:
@@ -3012,6 +3028,116 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_INIT))
+/*
+ * XE_LPD Power Domains
+ *
+ * Previous platforms required that PG(n-1) be enabled before PG(n). That
+ * dependency chain turns into a dependency tree on XE_LPD:
+ *
+ * PG0
+ * |
+ * --PG1--
+ * / \
+ * PGA --PG2--
+ * / | \
+ * PGB PGC PGD
+ *
+ * Power wells must be enabled from top to bottom and disabled from bottom
+ * to top. This allows pipes to be power gated independently.
+ */
+
+#define XELPD_PW_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_D) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define XELPD_PW_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define XELPD_PW_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define XELPD_PW_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define XELPD_PW_2_POWER_DOMAINS ( \
+ XELPD_PW_B_POWER_DOMAINS | \
+ XELPD_PW_C_POWER_DOMAINS | \
+ XELPD_PW_D_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+/*
+ * XELPD PW_1/PG_1 domains (under HW/DMC control):
+ * - DBUF function (registers are in PW0)
+ * - Transcoder A
+ * - DDI_A and DDI_B
+ *
+ * XELPD PW_0/PW_1 domains (under HW/DMC control):
+ * - PCI
+ * - Clocks except port PLL
+ * - Shared functions:
+ * * interrupts except pipe interrupts
+ * * MBus except PIPE_MBUS_DBOX_CTL
+ * * DBUF registers
+ * - Central power except FBC
+ * - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
+ */
+
+#define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ XELPD_PW_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD)
+#define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD)
+#define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1)
+#define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2)
+#define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3)
+#define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4)
+
+#define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1)
+#define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2)
+#define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3)
+#define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4)
+
+#define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD)
+#define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD)
+#define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
+#define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
+#define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
+#define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -4516,6 +4642,319 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
},
};
+static const struct i915_power_well_desc xelpd_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_DC_OFF,
+ },
+ {
+ .name = "power well 2",
+ .domains = XELPD_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well A",
+ .domains = XELPD_PW_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A,
+ .hsw.irq_pipe_mask = BIT(PIPE_A),
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well B",
+ .domains = XELPD_PW_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well C",
+ .domains = XELPD_PW_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well D",
+ .domains = XELPD_PW_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_D,
+ .hsw.irq_pipe_mask = BIT(PIPE_D),
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ }
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ }
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+ }
+ },
+ {
+ .name = "DDI IO D_XELPD",
+ .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = XELPD_PW_CTL_IDX_DDI_D,
+ }
+ },
+ {
+ .name = "DDI IO E_XELPD",
+ .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = XELPD_PW_CTL_IDX_DDI_E,
+ }
+ },
+ {
+ .name = "DDI IO TC1",
+ .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
+ }
+ },
+ {
+ .name = "DDI IO TC2",
+ .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
+ }
+ },
+ {
+ .name = "DDI IO TC3",
+ .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
+ }
+ },
+ {
+ .name = "DDI IO TC4",
+ .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
+ }
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = TGL_AUX_C_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ },
+ },
+ {
+ .name = "AUX D_XELPD",
+ .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = XELPD_PW_CTL_IDX_AUX_D,
+ },
+ },
+ {
+ .name = "AUX E_XELPD",
+ .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = XELPD_PW_CTL_IDX_AUX_E,
+ },
+ },
+ {
+ .name = "AUX USBC1",
+ .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
+ },
+ },
+ {
+ .name = "AUX USBC2",
+ .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
+ },
+ },
+ {
+ .name = "AUX USBC3",
+ .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
+ },
+ },
+ {
+ .name = "AUX USBC4",
+ .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
+ },
+ },
+ {
+ .name = "AUX TBT1",
+ .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT2",
+ .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT3",
+ .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT4",
+ .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -4533,14 +4972,17 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
+ if (!HAS_DISPLAY(dev_priv))
+ return 0;
+
if (IS_DG1(dev_priv))
max_dc = 3;
else if (DISPLAY_VER(dev_priv) >= 12)
max_dc = 4;
- else if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv) || IS_GEN9_BC(dev_priv))
- max_dc = 2;
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
max_dc = 1;
+ else if (DISPLAY_VER(dev_priv) >= 9)
+ max_dc = 2;
else
max_dc = 0;
@@ -4549,7 +4991,8 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
- mask = IS_GEN9_LP(dev_priv) || DISPLAY_VER(dev_priv) >= 11 ?
+ mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+ DISPLAY_VER(dev_priv) >= 11 ?
DC_STATE_EN_DC9 : 0;
if (!dev_priv->params.disable_power_well)
@@ -4656,10 +5099,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
dev_priv->params.disable_power_well);
- dev_priv->csr.allowed_dc_mask =
+ dev_priv->dmc.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
- dev_priv->csr.target_dc_state =
+ dev_priv->dmc.target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
@@ -4673,14 +5116,19 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) {
+ if (!HAS_DISPLAY(dev_priv)) {
+ power_domains->power_well_count = 0;
+ err = 0;
+ } else if (DISPLAY_VER(dev_priv) >= 13) {
+ err = set_power_wells(power_domains, xelpd_power_wells);
+ } else if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) {
err = set_power_wells_mask(power_domains, tgl_power_wells,
BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
} else if (IS_ROCKETLAKE(dev_priv)) {
err = set_power_wells(power_domains, rkl_power_wells);
- } else if (IS_DISPLAY_VER(dev_priv, 12)) {
+ } else if (DISPLAY_VER(dev_priv) == 12) {
err = set_power_wells(power_domains, tgl_power_wells);
- } else if (IS_DISPLAY_VER(dev_priv, 11)) {
+ } else if (DISPLAY_VER(dev_priv) == 11) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CNL_WITH_PORT_F(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -4692,7 +5140,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
err = set_power_wells(power_domains, glk_power_wells);
} else if (IS_BROXTON(dev_priv)) {
err = set_power_wells(power_domains, bxt_power_wells);
- } else if (IS_GEN9_BC(dev_priv)) {
+ } else if (DISPLAY_VER(dev_priv) == 9) {
err = set_power_wells(power_domains, skl_power_wells);
} else if (IS_CHERRYVIEW(dev_priv)) {
err = set_power_wells(power_domains, chv_power_wells);
@@ -4741,33 +5189,28 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
{
i915_reg_t reg = DBUF_CTL_S(slice);
bool state;
- u32 val;
- val = intel_de_read(dev_priv, reg);
- if (enable)
- val |= DBUF_POWER_REQUEST;
- else
- val &= ~DBUF_POWER_REQUEST;
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
+ enable ? DBUF_POWER_REQUEST : 0);
intel_de_posting_read(dev_priv, reg);
udelay(10);
state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
drm_WARN(&dev_priv->drm, enable != state,
"DBuf slice %d power %s timeout!\n",
- slice, enable ? "enable" : "disable");
+ slice, enabledisable(enable));
}
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
enum dbuf_slice slice;
- drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1),
- "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n",
- req_slices, num_slices);
+ drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
+ "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
+ req_slices, slice_mask);
drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
req_slices);
@@ -4781,7 +5224,7 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
*/
mutex_lock(&power_domains->lock);
- for (slice = DBUF_S1; slice < num_slices; slice++)
+ for_each_dbuf_slice(dev_priv, slice)
gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
dev_priv->dbuf.enabled_slices = req_slices;
@@ -4809,10 +5252,12 @@ static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
{
- const int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
enum dbuf_slice slice;
- for (slice = DBUF_S1; slice < (DBUF_S1 + num_slices); slice++)
+ if (IS_ALDERLAKE_P(dev_priv))
+ return;
+
+ for_each_dbuf_slice(dev_priv, slice)
intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
DBUF_TRACKER_STATE_SERVICE_MASK,
DBUF_TRACKER_STATE_SERVICE(8));
@@ -4823,6 +5268,9 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
u32 mask, val, i;
+ if (IS_ALDERLAKE_P(dev_priv))
+ return;
+
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
MBUS_ABOX_BT_CREDIT_POOL2_MASK |
MBUS_ABOX_B_CREDIT_MASK |
@@ -4837,7 +5285,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
* expect us to program the abox_ctl0 register as well, even though
* we don't have to program other instance-0 registers like BW_BUDDY.
*/
- if (IS_DISPLAY_VER(dev_priv, 12))
+ if (DISPLAY_VER(dev_priv) == 12)
abox_regs |= BIT(0);
for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
@@ -5122,6 +5570,9 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
/* enable PCH reset handshake */
intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
@@ -5137,8 +5588,8 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
gen9_dbuf_enable(dev_priv);
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
+ if (resume && intel_dmc_has_payload(dev_priv))
+ intel_dmc_load_program(dev_priv);
}
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -5146,6 +5597,9 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
gen9_disable_dc_states(dev_priv);
gen9_dbuf_disable(dev_priv);
@@ -5186,6 +5640,9 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
*/
intel_pch_reset_handshake(dev_priv, false);
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
/* Enable PG1 */
mutex_lock(&power_domains->lock);
@@ -5198,8 +5655,8 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
gen9_dbuf_enable(dev_priv);
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
+ if (resume && intel_dmc_has_payload(dev_priv))
+ intel_dmc_load_program(dev_priv);
}
static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -5207,6 +5664,9 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
gen9_disable_dc_states(dev_priv);
gen9_dbuf_disable(dev_priv);
@@ -5240,6 +5700,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
/* 1. Enable PCH Reset Handshake */
intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
/* 2-3. */
intel_combo_phy_init(dev_priv);
@@ -5258,8 +5721,8 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
+ if (resume && intel_dmc_has_payload(dev_priv))
+ intel_dmc_load_program(dev_priv);
}
static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -5267,6 +5730,9 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
gen9_disable_dc_states(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -5381,6 +5847,9 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* 1. Enable PCH reset handshake. */
intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
/* 2. Initialize all combo phys */
intel_combo_phy_init(dev_priv);
@@ -5409,15 +5878,19 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
if (DISPLAY_VER(dev_priv) >= 12)
tgl_bw_buddy_init(dev_priv);
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
+ if (resume && intel_dmc_has_payload(dev_priv))
+ intel_dmc_load_program(dev_priv);
/* Wa_14011508470 */
- if (IS_DISPLAY_VER(dev_priv, 12)) {
+ if (DISPLAY_VER(dev_priv) == 12) {
val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
}
+
+ /* Wa_14011503030:xelpd */
+ if (DISPLAY_VER(dev_priv) >= 13)
+ intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
}
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -5425,6 +5898,9 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
gen9_disable_dc_states(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -5623,10 +6099,10 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
cnl_display_core_init(i915, resume);
- } else if (IS_GEN9_BC(i915)) {
- skl_display_core_init(i915, resume);
- } else if (IS_GEN9_LP(i915)) {
+ } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_display_core_init(i915, resume);
+ } else if (DISPLAY_VER(i915) == 9) {
+ skl_display_core_init(i915, resume);
} else if (IS_CHERRYVIEW(i915)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(i915);
@@ -5757,13 +6233,13 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
/*
* In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
* support don't manually deinit the power domains. This also means the
- * CSR/DMC firmware will stay active, it will power down any HW
+ * DMC firmware will stay active, it will power down any HW
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
- i915->csr.dmc_payload) {
+ intel_dmc_has_payload(i915)) {
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
return;
@@ -5784,10 +6260,10 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
icl_display_core_uninit(i915);
else if (IS_CANNONLAKE(i915))
cnl_display_core_uninit(i915);
- else if (IS_GEN9_BC(i915))
- skl_display_core_uninit(i915);
- else if (IS_GEN9_LP(i915))
+ else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
bxt_display_core_uninit(i915);
+ else if (DISPLAY_VER(i915) == 9)
+ skl_display_core_uninit(i915);
power_domains->display_core_suspended = true;
}
@@ -5908,7 +6384,8 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
void intel_display_power_suspend_late(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 11 || IS_GEN9_LP(i915)) {
+ if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
+ IS_BROXTON(i915)) {
bxt_enable_dc9(i915);
/* Tweaked Wa_14010685332:icp,jsp,mcc */
if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
@@ -5921,7 +6398,8 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
void intel_display_power_resume_early(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 11 || IS_GEN9_LP(i915)) {
+ if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
+ IS_BROXTON(i915)) {
gen9_sanitize_dc_state(i915);
bxt_disable_dc9(i915);
/* Tweaked Wa_14010685332:icp,jsp,mcc */
@@ -5938,7 +6416,7 @@ void intel_display_power_suspend(struct drm_i915_private *i915)
if (DISPLAY_VER(i915) >= 11) {
icl_display_core_uninit(i915);
bxt_enable_dc9(i915);
- } else if (IS_GEN9_LP(i915)) {
+ } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_display_core_uninit(i915);
bxt_enable_dc9(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
@@ -5951,19 +6429,19 @@ void intel_display_power_resume(struct drm_i915_private *i915)
if (DISPLAY_VER(i915) >= 11) {
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
- if (i915->csr.dmc_payload) {
- if (i915->csr.allowed_dc_mask &
+ if (intel_dmc_has_payload(i915)) {
+ if (i915->dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(i915);
- else if (i915->csr.allowed_dc_mask &
+ else if (i915->dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(i915);
}
- } else if (IS_GEN9_LP(i915)) {
+ } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_disable_dc9(i915);
bxt_display_core_init(i915, true);
- if (i915->csr.dmc_payload &&
- (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ if (intel_dmc_has_payload(i915) &&
+ (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index f3ca5d5c9778..4f0917df4375 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -49,6 +49,9 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_LANES_TC5,
POWER_DOMAIN_PORT_DDI_LANES_TC6,
+ POWER_DOMAIN_PORT_DDI_LANES_D_XELPD = POWER_DOMAIN_PORT_DDI_LANES_TC5, /* XELPD */
+ POWER_DOMAIN_PORT_DDI_LANES_E_XELPD,
+
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
@@ -66,6 +69,9 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_IO_TC5,
POWER_DOMAIN_PORT_DDI_IO_TC6,
+ POWER_DOMAIN_PORT_DDI_IO_D_XELPD = POWER_DOMAIN_PORT_DDI_IO_TC5, /* XELPD */
+ POWER_DOMAIN_PORT_DDI_IO_E_XELPD,
+
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -88,6 +94,9 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_USBC5,
POWER_DOMAIN_AUX_USBC6,
+ POWER_DOMAIN_AUX_D_XELPD = POWER_DOMAIN_AUX_USBC5, /* XELPD */
+ POWER_DOMAIN_AUX_E_XELPD,
+
POWER_DOMAIN_AUX_IO_A,
POWER_DOMAIN_AUX_C_TBT,
POWER_DOMAIN_AUX_D_TBT,
@@ -380,6 +389,8 @@ intel_display_power_put_all_in_set(struct drm_i915_private *i915,
enum dbuf_slice {
DBUF_S1,
DBUF_S2,
+ DBUF_S3,
+ DBUF_S4,
I915_MAX_DBUF_SLICES
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index e2e707c4dff5..04613864cbe8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -45,7 +45,6 @@
#include <media/cec-notifier.h>
#include "i915_drv.h"
-#include "intel_de.h"
struct drm_printer;
struct __intel_global_objs_state;
@@ -127,8 +126,12 @@ struct intel_framebuffer {
/* Params to remap the FB pages and program the plane registers in each view. */
struct intel_fb_view normal_view;
- struct intel_fb_view rotated_view;
- struct intel_fb_view remapped_view;
+ union {
+ struct intel_fb_view rotated_view;
+ struct intel_fb_view remapped_view;
+ };
+
+ struct i915_address_space *dpt_vm;
};
struct intel_fbdev {
@@ -611,7 +614,8 @@ struct intel_plane_state {
enum drm_scaling_filter scaling_filter;
} hw;
- struct i915_vma *vma;
+ struct i915_vma *ggtt_vma;
+ struct i915_vma *dpt_vma;
unsigned long flags;
#define PLANE_HAS_FENCE BIT(0)
@@ -1198,7 +1202,7 @@ struct intel_crtc_state {
struct {
bool enable;
u8 pipeline_full;
- u16 flipline, vmin, vmax;
+ u16 flipline, vmin, vmax, guardband;
} vrr;
/* Stream Splitter for eDP MSO */
@@ -1478,6 +1482,7 @@ struct intel_psr {
bool sink_support;
bool source_support;
bool enabled;
+ bool paused;
enum pipe pipe;
enum transcoder transcoder;
bool active;
@@ -1494,7 +1499,7 @@ struct intel_psr {
bool sink_not_reliable;
bool irq_aux_error;
u16 su_x_granularity;
- bool dc3co_enabled;
+ u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
struct drm_dp_vsc_sdp vsc;
@@ -1718,6 +1723,14 @@ vlv_pipe_to_channel(enum pipe pipe)
}
}
+static inline bool intel_pipe_valid(struct drm_i915_private *i915, enum pipe pipe)
+{
+ return (pipe >= 0 &&
+ pipe < ARRAY_SIZE(i915->pipe_to_crtc_mapping) &&
+ INTEL_INFO(i915)->pipe_mask & BIT(pipe) &&
+ i915->pipe_to_crtc_mapping[pipe]);
+}
+
static inline struct intel_crtc *
intel_get_first_crtc(struct drm_i915_private *dev_priv)
{
@@ -1973,9 +1986,19 @@ intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pip
intel_wait_for_vblank(dev_priv, pipe);
}
-static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+static inline bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
+{
+ return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR;
+}
+
+static inline bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
+{
+ return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
+}
+
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
{
- return i915_ggtt_offset(state->vma);
+ return i915_ggtt_offset(plane_state->ggtt_vma);
}
static inline struct intel_frontbuffer *
diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 794efcc3ca08..97308da28059 100644
--- a/drivers/gpu/drm/i915/display/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -26,67 +26,72 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_csr.h"
#include "intel_de.h"
+#include "intel_dmc.h"
/**
- * DOC: csr support for dmc
+ * DOC: DMC Firmware Support
*
- * Display Context Save and Restore (CSR) firmware support added from gen9
- * onwards to drive newly added DMC (Display microcontroller) in display
+ * From gen9 onwards we have newly added DMC (Display microcontroller) in display
* engine to save and restore the state of display engine when it enter into
* low-power state and comes back to normal.
*/
-#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-
-#define ADLS_CSR_PATH "i915/adls_dmc_ver2_01.bin"
-#define ADLS_CSR_VERSION_REQUIRED CSR_VERSION(2, 1)
-MODULE_FIRMWARE(ADLS_CSR_PATH);
-
-#define DG1_CSR_PATH "i915/dg1_dmc_ver2_02.bin"
-#define DG1_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
-MODULE_FIRMWARE(DG1_CSR_PATH);
-
-#define RKL_CSR_PATH "i915/rkl_dmc_ver2_02.bin"
-#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
-MODULE_FIRMWARE(RKL_CSR_PATH);
-
-#define TGL_CSR_PATH "i915/tgl_dmc_ver2_08.bin"
-#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 8)
-MODULE_FIRMWARE(TGL_CSR_PATH);
-
-#define ICL_CSR_PATH "i915/icl_dmc_ver1_09.bin"
-#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 9)
-#define ICL_CSR_MAX_FW_SIZE 0x6000
-MODULE_FIRMWARE(ICL_CSR_PATH);
-
-#define CNL_CSR_PATH "i915/cnl_dmc_ver1_07.bin"
-#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE
-MODULE_FIRMWARE(CNL_CSR_PATH);
-
-#define GLK_CSR_PATH "i915/glk_dmc_ver1_04.bin"
-#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
-#define GLK_CSR_MAX_FW_SIZE 0x4000
-MODULE_FIRMWARE(GLK_CSR_PATH);
-
-#define KBL_CSR_PATH "i915/kbl_dmc_ver1_04.bin"
-#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
-#define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
-MODULE_FIRMWARE(KBL_CSR_PATH);
-
-#define SKL_CSR_PATH "i915/skl_dmc_ver1_27.bin"
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
-#define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
-MODULE_FIRMWARE(SKL_CSR_PATH);
-
-#define BXT_CSR_PATH "i915/bxt_dmc_ver1_07.bin"
-#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define BXT_CSR_MAX_FW_SIZE 0x3000
-MODULE_FIRMWARE(BXT_CSR_PATH);
-
-#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
+#define DMC_PATH(platform, major, minor) \
+ "i915/" \
+ __stringify(platform) "_dmc_ver" \
+ __stringify(major) "_" \
+ __stringify(minor) ".bin"
+
+#define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+
+#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
+#define ADLS_DMC_VERSION_REQUIRED DMC_VERSION(2, 1)
+MODULE_FIRMWARE(ADLS_DMC_PATH);
+
+#define DG1_DMC_PATH DMC_PATH(dg1, 2, 02)
+#define DG1_DMC_VERSION_REQUIRED DMC_VERSION(2, 2)
+MODULE_FIRMWARE(DG1_DMC_PATH);
+
+#define RKL_DMC_PATH DMC_PATH(rkl, 2, 02)
+#define RKL_DMC_VERSION_REQUIRED DMC_VERSION(2, 2)
+MODULE_FIRMWARE(RKL_DMC_PATH);
+
+#define TGL_DMC_PATH DMC_PATH(tgl, 2, 08)
+#define TGL_DMC_VERSION_REQUIRED DMC_VERSION(2, 8)
+MODULE_FIRMWARE(TGL_DMC_PATH);
+
+#define ICL_DMC_PATH DMC_PATH(icl, 1, 09)
+#define ICL_DMC_VERSION_REQUIRED DMC_VERSION(1, 9)
+#define ICL_DMC_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(ICL_DMC_PATH);
+
+#define CNL_DMC_PATH DMC_PATH(cnl, 1, 07)
+#define CNL_DMC_VERSION_REQUIRED DMC_VERSION(1, 7)
+#define CNL_DMC_MAX_FW_SIZE GLK_DMC_MAX_FW_SIZE
+MODULE_FIRMWARE(CNL_DMC_PATH);
+
+#define GLK_DMC_PATH DMC_PATH(glk, 1, 04)
+#define GLK_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
+#define GLK_DMC_MAX_FW_SIZE 0x4000
+MODULE_FIRMWARE(GLK_DMC_PATH);
+
+#define KBL_DMC_PATH DMC_PATH(kbl, 1, 04)
+#define KBL_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
+#define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
+MODULE_FIRMWARE(KBL_DMC_PATH);
+
+#define SKL_DMC_PATH DMC_PATH(skl, 1, 27)
+#define SKL_DMC_VERSION_REQUIRED DMC_VERSION(1, 27)
+#define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
+MODULE_FIRMWARE(SKL_DMC_PATH);
+
+#define BXT_DMC_PATH DMC_PATH(bxt, 1, 07)
+#define BXT_DMC_VERSION_REQUIRED DMC_VERSION(1, 7)
+#define BXT_DMC_MAX_FW_SIZE 0x3000
+MODULE_FIRMWARE(BXT_DMC_PATH);
+
+#define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF
#define PACKAGE_MAX_FW_INFO_ENTRIES 20
#define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
#define DMC_V1_MAX_MMIO_COUNT 8
@@ -232,6 +237,11 @@ struct stepping_info {
char substepping;
};
+bool intel_dmc_has_payload(struct drm_i915_private *i915)
+{
+ return i915->dmc.dmc_payload;
+}
+
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
@@ -284,7 +294,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */
@@ -297,47 +307,47 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
}
/**
- * intel_csr_load_program() - write the firmware from memory to register.
+ * intel_dmc_load_program() - write the firmware from memory to register.
* @dev_priv: i915 drm device.
*
- * CSR firmware is read from a .bin file and kept in internal memory one time.
+ * DMC firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-void intel_csr_load_program(struct drm_i915_private *dev_priv)
+void intel_dmc_load_program(struct drm_i915_private *dev_priv)
{
- u32 *payload = dev_priv->csr.dmc_payload;
+ u32 *payload = dev_priv->dmc.dmc_payload;
u32 i, fw_size;
- if (!HAS_CSR(dev_priv)) {
+ if (!HAS_DMC(dev_priv)) {
drm_err(&dev_priv->drm,
- "No CSR support available for this platform\n");
+ "No DMC support available for this platform\n");
return;
}
- if (!dev_priv->csr.dmc_payload) {
+ if (!intel_dmc_has_payload(dev_priv)) {
drm_err(&dev_priv->drm,
"Tried to program CSR with empty payload\n");
return;
}
- fw_size = dev_priv->csr.dmc_fw_size;
+ fw_size = dev_priv->dmc.dmc_fw_size;
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
preempt_disable();
for (i = 0; i < fw_size; i++)
- intel_uncore_write_fw(&dev_priv->uncore, CSR_PROGRAM(i),
+ intel_uncore_write_fw(&dev_priv->uncore, DMC_PROGRAM(i),
payload[i]);
preempt_enable();
- for (i = 0; i < dev_priv->csr.mmio_count; i++) {
- intel_de_write(dev_priv, dev_priv->csr.mmioaddr[i],
- dev_priv->csr.mmiodata[i]);
+ for (i = 0; i < dev_priv->dmc.mmio_count; i++) {
+ intel_de_write(dev_priv, dev_priv->dmc.mmioaddr[i],
+ dev_priv->dmc.mmiodata[i]);
}
- dev_priv->csr.dc_state = 0;
+ dev_priv->dmc.dc_state = 0;
gen9_set_dc_state_debugmask(dev_priv);
}
@@ -351,7 +361,7 @@ static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
const struct stepping_info *si,
u8 package_ver)
{
- u32 dmc_offset = CSR_DEFAULT_FW_OFFSET;
+ u32 dmc_offset = DMC_DEFAULT_FW_OFFSET;
unsigned int i;
for (i = 0; i < num_entries; i++) {
@@ -386,17 +396,18 @@ static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
return dmc_offset;
}
-static u32 parse_csr_fw_dmc(struct intel_csr *csr,
- const struct intel_dmc_header_base *dmc_header,
- size_t rem_size)
+static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
+ const struct intel_dmc_header_base *dmc_header,
+ size_t rem_size)
{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
u32 mmio_count, mmio_count_max;
u8 *payload;
- BUILD_BUG_ON(ARRAY_SIZE(csr->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
- ARRAY_SIZE(csr->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(dmc->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
+ ARRAY_SIZE(dmc->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
/*
* Check if we can access common fields, we will checkc again below
@@ -434,34 +445,34 @@ static u32 parse_csr_fw_dmc(struct intel_csr *csr,
header_len_bytes = dmc_header->header_len;
dmc_header_size = sizeof(*v1);
} else {
- DRM_ERROR("Unknown DMC fw header version: %u\n",
- dmc_header->header_ver);
+ drm_err(&i915->drm, "Unknown DMC fw header version: %u\n",
+ dmc_header->header_ver);
return 0;
}
if (header_len_bytes != dmc_header_size) {
- DRM_ERROR("DMC firmware has wrong dmc header length "
- "(%u bytes)\n", header_len_bytes);
+ drm_err(&i915->drm, "DMC firmware has wrong dmc header length "
+ "(%u bytes)\n", header_len_bytes);
return 0;
}
/* Cache the dmc header info. */
if (mmio_count > mmio_count_max) {
- DRM_ERROR("DMC firmware has wrong mmio count %u\n", mmio_count);
+ drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count);
return 0;
}
for (i = 0; i < mmio_count; i++) {
- if (mmioaddr[i] < CSR_MMIO_START_RANGE ||
- mmioaddr[i] > CSR_MMIO_END_RANGE) {
- DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
- mmioaddr[i]);
+ if (mmioaddr[i] < DMC_MMIO_START_RANGE ||
+ mmioaddr[i] > DMC_MMIO_END_RANGE) {
+ drm_err(&i915->drm, "DMC firmware has wrong mmio address 0x%x\n",
+ mmioaddr[i]);
return 0;
}
- csr->mmioaddr[i] = _MMIO(mmioaddr[i]);
- csr->mmiodata[i] = mmiodata[i];
+ dmc->mmioaddr[i] = _MMIO(mmioaddr[i]);
+ dmc->mmiodata[i] = mmiodata[i];
}
- csr->mmio_count = mmio_count;
+ dmc->mmio_count = mmio_count;
rem_size -= header_len_bytes;
@@ -470,34 +481,33 @@ static u32 parse_csr_fw_dmc(struct intel_csr *csr,
if (rem_size < payload_size)
goto error_truncated;
- if (payload_size > csr->max_fw_size) {
- DRM_ERROR("DMC FW too big (%u bytes)\n", payload_size);
+ if (payload_size > dmc->max_fw_size) {
+ drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size);
return 0;
}
- csr->dmc_fw_size = dmc_header->fw_size;
+ dmc->dmc_fw_size = dmc_header->fw_size;
- csr->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
- if (!csr->dmc_payload) {
- DRM_ERROR("Memory allocation failed for dmc payload\n");
+ dmc->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
+ if (!dmc->dmc_payload)
return 0;
- }
payload = (u8 *)(dmc_header) + header_len_bytes;
- memcpy(csr->dmc_payload, payload, payload_size);
+ memcpy(dmc->dmc_payload, payload, payload_size);
return header_len_bytes + payload_size;
error_truncated:
- DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
return 0;
}
static u32
-parse_csr_fw_package(struct intel_csr *csr,
+parse_dmc_fw_package(struct intel_dmc *dmc,
const struct intel_package_header *package_header,
const struct stepping_info *si,
size_t rem_size)
{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
u32 package_size = sizeof(struct intel_package_header);
u32 num_entries, max_entries, dmc_offset;
const struct intel_fw_info *fw_info;
@@ -510,8 +520,8 @@ parse_csr_fw_package(struct intel_csr *csr,
} else if (package_header->header_ver == 2) {
max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
} else {
- DRM_ERROR("DMC firmware has unknown header version %u\n",
- package_header->header_ver);
+ drm_err(&i915->drm, "DMC firmware has unknown header version %u\n",
+ package_header->header_ver);
return 0;
}
@@ -524,8 +534,8 @@ parse_csr_fw_package(struct intel_csr *csr,
goto error_truncated;
if (package_header->header_len * 4 != package_size) {
- DRM_ERROR("DMC firmware has wrong package header length "
- "(%u bytes)\n", package_size);
+ drm_err(&i915->drm, "DMC firmware has wrong package header length "
+ "(%u bytes)\n", package_size);
return 0;
}
@@ -537,9 +547,9 @@ parse_csr_fw_package(struct intel_csr *csr,
((u8 *)package_header + sizeof(*package_header));
dmc_offset = find_dmc_fw_offset(fw_info, num_entries, si,
package_header->header_ver);
- if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
- DRM_ERROR("DMC firmware not supported for %c stepping\n",
- si->stepping);
+ if (dmc_offset == DMC_DEFAULT_FW_OFFSET) {
+ drm_err(&i915->drm, "DMC firmware not supported for %c stepping\n",
+ si->stepping);
return 0;
}
@@ -547,51 +557,53 @@ parse_csr_fw_package(struct intel_csr *csr,
return package_size + dmc_offset * 4;
error_truncated:
- DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
return 0;
}
/* Return number of bytes parsed or 0 on error */
-static u32 parse_csr_fw_css(struct intel_csr *csr,
+static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
struct intel_css_header *css_header,
size_t rem_size)
{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+
if (rem_size < sizeof(struct intel_css_header)) {
- DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
return 0;
}
if (sizeof(struct intel_css_header) !=
(css_header->header_len * 4)) {
- DRM_ERROR("DMC firmware has wrong CSS header length "
- "(%u bytes)\n",
- (css_header->header_len * 4));
+ drm_err(&i915->drm, "DMC firmware has wrong CSS header length "
+ "(%u bytes)\n",
+ (css_header->header_len * 4));
return 0;
}
- if (csr->required_version &&
- css_header->version != csr->required_version) {
- DRM_INFO("Refusing to load DMC firmware v%u.%u,"
+ if (dmc->required_version &&
+ css_header->version != dmc->required_version) {
+ drm_info(&i915->drm, "Refusing to load DMC firmware v%u.%u,"
" please use v%u.%u\n",
- CSR_VERSION_MAJOR(css_header->version),
- CSR_VERSION_MINOR(css_header->version),
- CSR_VERSION_MAJOR(csr->required_version),
- CSR_VERSION_MINOR(csr->required_version));
+ DMC_VERSION_MAJOR(css_header->version),
+ DMC_VERSION_MINOR(css_header->version),
+ DMC_VERSION_MAJOR(dmc->required_version),
+ DMC_VERSION_MINOR(dmc->required_version));
return 0;
}
- csr->version = css_header->version;
+ dmc->version = css_header->version;
return sizeof(struct intel_css_header);
}
-static void parse_csr_fw(struct drm_i915_private *dev_priv,
+static void parse_dmc_fw(struct drm_i915_private *dev_priv,
const struct firmware *fw)
{
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
- struct intel_csr *csr = &dev_priv->csr;
+ struct intel_dmc *dmc = &dev_priv->dmc;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
u32 readcount = 0;
u32 r;
@@ -601,7 +613,7 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv,
/* Extract CSS Header information */
css_header = (struct intel_css_header *)fw->data;
- r = parse_csr_fw_css(csr, css_header, fw->size);
+ r = parse_dmc_fw_css(dmc, css_header, fw->size);
if (!r)
return;
@@ -609,7 +621,7 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv,
/* Extract Package Header information */
package_header = (struct intel_package_header *)&fw->data[readcount];
- r = parse_csr_fw_package(csr, package_header, si, fw->size - readcount);
+ r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
if (!r)
return;
@@ -617,49 +629,49 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv,
/* Extract dmc_header information */
dmc_header = (struct intel_dmc_header_base *)&fw->data[readcount];
- parse_csr_fw_dmc(csr, dmc_header, fw->size - readcount);
+ parse_dmc_fw_header(dmc, dmc_header, fw->size - readcount);
}
-static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
- dev_priv->csr.wakeref =
+ drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
+ dev_priv->dmc.wakeref =
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
-static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&dev_priv->csr.wakeref);
+ fetch_and_zero(&dev_priv->dmc.wakeref);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
}
-static void csr_load_work_fn(struct work_struct *work)
+static void dmc_load_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
- struct intel_csr *csr;
+ struct intel_dmc *dmc;
const struct firmware *fw = NULL;
- dev_priv = container_of(work, typeof(*dev_priv), csr.work);
- csr = &dev_priv->csr;
+ dev_priv = container_of(work, typeof(*dev_priv), dmc.work);
+ dmc = &dev_priv->dmc;
- request_firmware(&fw, dev_priv->csr.fw_path, dev_priv->drm.dev);
- parse_csr_fw(dev_priv, fw);
+ request_firmware(&fw, dev_priv->dmc.fw_path, dev_priv->drm.dev);
+ parse_dmc_fw(dev_priv, fw);
- if (dev_priv->csr.dmc_payload) {
- intel_csr_load_program(dev_priv);
- intel_csr_runtime_pm_put(dev_priv);
+ if (intel_dmc_has_payload(dev_priv)) {
+ intel_dmc_load_program(dev_priv);
+ intel_dmc_runtime_pm_put(dev_priv);
drm_info(&dev_priv->drm,
"Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->csr.fw_path, CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version));
+ dev_priv->dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
} else {
drm_notice(&dev_priv->drm,
"Failed to load DMC firmware %s."
" Disabling runtime power management.\n",
- csr->fw_path);
+ dmc->fw_path);
drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
INTEL_UC_FIRMWARE_URL);
}
@@ -668,152 +680,152 @@ static void csr_load_work_fn(struct work_struct *work)
}
/**
- * intel_csr_ucode_init() - initialize the firmware loading.
+ * intel_dmc_ucode_init() - initialize the firmware loading.
* @dev_priv: i915 drm device.
*
* This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory.
*/
-void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
+void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
{
- struct intel_csr *csr = &dev_priv->csr;
+ struct intel_dmc *dmc = &dev_priv->dmc;
- INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
+ INIT_WORK(&dev_priv->dmc.work, dmc_load_work_fn);
- if (!HAS_CSR(dev_priv))
+ if (!HAS_DMC(dev_priv))
return;
/*
- * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
+ * Obtain a runtime pm reference, until DMC is loaded, to avoid entering
* runtime-suspend.
*
* On error, we return with the rpm wakeref held to prevent runtime
- * suspend as runtime suspend *requires* a working CSR for whatever
+ * suspend as runtime suspend *requires* a working DMC for whatever
* reason.
*/
- intel_csr_runtime_pm_get(dev_priv);
+ intel_dmc_runtime_pm_get(dev_priv);
if (IS_ALDERLAKE_S(dev_priv)) {
- csr->fw_path = ADLS_CSR_PATH;
- csr->required_version = ADLS_CSR_VERSION_REQUIRED;
- csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ dmc->fw_path = ADLS_DMC_PATH;
+ dmc->required_version = ADLS_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
} else if (IS_DG1(dev_priv)) {
- csr->fw_path = DG1_CSR_PATH;
- csr->required_version = DG1_CSR_VERSION_REQUIRED;
- csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ dmc->fw_path = DG1_DMC_PATH;
+ dmc->required_version = DG1_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
} else if (IS_ROCKETLAKE(dev_priv)) {
- csr->fw_path = RKL_CSR_PATH;
- csr->required_version = RKL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ dmc->fw_path = RKL_DMC_PATH;
+ dmc->required_version = RKL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- csr->fw_path = TGL_CSR_PATH;
- csr->required_version = TGL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
- } else if (IS_DISPLAY_VER(dev_priv, 11)) {
- csr->fw_path = ICL_CSR_PATH;
- csr->required_version = ICL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
+ dmc->fw_path = TGL_DMC_PATH;
+ dmc->required_version = TGL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VER(dev_priv) == 11) {
+ dmc->fw_path = ICL_DMC_PATH;
+ dmc->required_version = ICL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
} else if (IS_CANNONLAKE(dev_priv)) {
- csr->fw_path = CNL_CSR_PATH;
- csr->required_version = CNL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
+ dmc->fw_path = CNL_DMC_PATH;
+ dmc->required_version = CNL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = CNL_DMC_MAX_FW_SIZE;
} else if (IS_GEMINILAKE(dev_priv)) {
- csr->fw_path = GLK_CSR_PATH;
- csr->required_version = GLK_CSR_VERSION_REQUIRED;
- csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
+ dmc->fw_path = GLK_DMC_PATH;
+ dmc->required_version = GLK_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
} else if (IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_COMETLAKE(dev_priv)) {
- csr->fw_path = KBL_CSR_PATH;
- csr->required_version = KBL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
+ dmc->fw_path = KBL_DMC_PATH;
+ dmc->required_version = KBL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
} else if (IS_SKYLAKE(dev_priv)) {
- csr->fw_path = SKL_CSR_PATH;
- csr->required_version = SKL_CSR_VERSION_REQUIRED;
- csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
+ dmc->fw_path = SKL_DMC_PATH;
+ dmc->required_version = SKL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
} else if (IS_BROXTON(dev_priv)) {
- csr->fw_path = BXT_CSR_PATH;
- csr->required_version = BXT_CSR_VERSION_REQUIRED;
- csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
+ dmc->fw_path = BXT_DMC_PATH;
+ dmc->required_version = BXT_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
if (dev_priv->params.dmc_firmware_path) {
if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
- csr->fw_path = NULL;
+ dmc->fw_path = NULL;
drm_info(&dev_priv->drm,
- "Disabling CSR firmware and runtime PM\n");
+ "Disabling DMC firmware and runtime PM\n");
return;
}
- csr->fw_path = dev_priv->params.dmc_firmware_path;
+ dmc->fw_path = dev_priv->params.dmc_firmware_path;
/* Bypass version check for firmware override. */
- csr->required_version = 0;
+ dmc->required_version = 0;
}
- if (csr->fw_path == NULL) {
+ if (!dmc->fw_path) {
drm_dbg_kms(&dev_priv->drm,
- "No known CSR firmware for platform, disabling runtime PM\n");
+ "No known DMC firmware for platform, disabling runtime PM\n");
return;
}
- drm_dbg_kms(&dev_priv->drm, "Loading %s\n", csr->fw_path);
- schedule_work(&dev_priv->csr.work);
+ drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
+ schedule_work(&dev_priv->dmc.work);
}
/**
- * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
+ * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend
* @dev_priv: i915 drm device
*
* Prepare the DMC firmware before entering system suspend. This includes
* flushing pending work items and releasing any resources acquired during
* init.
*/
-void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
+void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
{
- if (!HAS_CSR(dev_priv))
+ if (!HAS_DMC(dev_priv))
return;
- flush_work(&dev_priv->csr.work);
+ flush_work(&dev_priv->dmc.work);
/* Drop the reference held in case DMC isn't loaded. */
- if (!dev_priv->csr.dmc_payload)
- intel_csr_runtime_pm_put(dev_priv);
+ if (!intel_dmc_has_payload(dev_priv))
+ intel_dmc_runtime_pm_put(dev_priv);
}
/**
- * intel_csr_ucode_resume() - init CSR firmware during system resume
+ * intel_dmc_ucode_resume() - init DMC firmware during system resume
* @dev_priv: i915 drm device
*
* Reinitialize the DMC firmware during system resume, reacquiring any
- * resources released in intel_csr_ucode_suspend().
+ * resources released in intel_dmc_ucode_suspend().
*/
-void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
+void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
{
- if (!HAS_CSR(dev_priv))
+ if (!HAS_DMC(dev_priv))
return;
/*
* Reacquire the reference to keep RPM disabled in case DMC isn't
* loaded.
*/
- if (!dev_priv->csr.dmc_payload)
- intel_csr_runtime_pm_get(dev_priv);
+ if (!intel_dmc_has_payload(dev_priv))
+ intel_dmc_runtime_pm_get(dev_priv);
}
/**
- * intel_csr_ucode_fini() - unload the CSR firmware.
+ * intel_dmc_ucode_fini() - unload the DMC firmware.
* @dev_priv: i915 drm device.
*
* Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
-void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
+void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
{
- if (!HAS_CSR(dev_priv))
+ if (!HAS_DMC(dev_priv))
return;
- intel_csr_ucode_suspend(dev_priv);
- drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
+ intel_dmc_ucode_suspend(dev_priv);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
- kfree(dev_priv->csr.dmc_payload);
+ kfree(dev_priv->dmc.dmc_payload);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
new file mode 100644
index 000000000000..4c22f567b61b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DMC_H__
+#define __INTEL_DMC_H__
+
+#include "i915_reg.h"
+#include "intel_wakeref.h"
+#include <linux/workqueue.h>
+
+struct drm_i915_private;
+
+#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
+#define DMC_VERSION_MAJOR(version) ((version) >> 16)
+#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
+
+struct intel_dmc {
+ struct work_struct work;
+ const char *fw_path;
+ u32 required_version;
+ u32 max_fw_size; /* bytes */
+ u32 *dmc_payload;
+ u32 dmc_fw_size; /* dwords */
+ u32 version;
+ u32 mmio_count;
+ i915_reg_t mmioaddr[20];
+ u32 mmiodata[20];
+ u32 dc_state;
+ u32 target_dc_state;
+ u32 allowed_dc_mask;
+ intel_wakeref_t wakeref;
+};
+
+void intel_dmc_ucode_init(struct drm_i915_private *i915);
+void intel_dmc_load_program(struct drm_i915_private *i915);
+void intel_dmc_ucode_fini(struct drm_i915_private *i915);
+void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
+void intel_dmc_ucode_resume(struct drm_i915_private *i915);
+bool intel_dmc_has_payload(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DMC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 642c60f3d9b1..5c9222283044 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -46,13 +46,15 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
+#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
-#include "intel_dpll.h"
#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -107,6 +109,7 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
}
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
@@ -215,7 +218,7 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
return DISPLAY_VER(dev_priv) >= 12 ||
- (IS_DISPLAY_VER(dev_priv, 11) &&
+ (DISPLAY_VER(dev_priv) == 11 &&
encoder->port != PORT_A);
}
@@ -295,16 +298,16 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- if (IS_DISPLAY_VER(dev_priv, 10))
+ if (DISPLAY_VER(dev_priv) == 10)
max_rate = cnl_max_source_rate(intel_dp);
else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
- } else if (IS_GEN9_LP(dev_priv)) {
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
- } else if (IS_GEN9_BC(dev_priv)) {
+ } else if (DISPLAY_VER(dev_priv) == 9) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
@@ -492,7 +495,8 @@ small_joiner_ram_size_bits(struct drm_i915_private *i915)
static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay,
- bool bigjoiner)
+ bool bigjoiner,
+ u32 pipe_bpp)
{
u32 bits_per_pixel, max_bpp_small_joiner_ram;
int i;
@@ -539,12 +543,17 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
return 0;
}
- /* Find the nearest match in the array of known BPPs from VESA */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
- if (bits_per_pixel < valid_dsc_bpp[i + 1])
- break;
+ /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
+ if (DISPLAY_VER(i915) >= 13) {
+ bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
+ } else {
+ /* Find the nearest match in the array of known BPPs from VESA */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+ if (bits_per_pixel < valid_dsc_bpp[i + 1])
+ break;
+ }
+ bits_per_pixel = valid_dsc_bpp[i];
}
- bits_per_pixel = valid_dsc_bpp[i];
/*
* Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
@@ -778,6 +787,12 @@ intel_dp_mode_valid(struct drm_connector *connector,
*/
if (DISPLAY_VER(dev_priv) >= 10 &&
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
+ /*
+ * TBD pass the connector BPC,
+ * for now U8_MAX so that max BPC on that platform would be picked
+ */
+ int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX);
+
if (intel_dp_is_edp(intel_dp)) {
dsc_max_output_bpp =
drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
@@ -791,7 +806,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_lanes,
target_clock,
mode->hdisplay,
- bigjoiner) >> 4;
+ bigjoiner,
+ pipe_bpp) >> 4;
dsc_slice_count =
intel_dp_dsc_get_slice_count(intel_dp,
target_clock,
@@ -802,8 +818,11 @@ intel_dp_mode_valid(struct drm_connector *connector,
dsc = dsc_max_output_bpp && dsc_slice_count;
}
- /* big joiner configuration needs DSC */
- if (bigjoiner && !dsc)
+ /*
+ * Big joiner configuration needs DSC for TGL which is not true for
+ * XE_LPD where uncompressed joiner is supported.
+ */
+ if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
return MODE_CLOCK_HIGH;
if (mode_rate > max_rate && !dsc)
@@ -916,7 +935,7 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
if (DISPLAY_VER(dev_priv) >= 12)
return true;
- if (IS_DISPLAY_VER(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
+ if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A)
return true;
return false;
@@ -1095,10 +1114,18 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i, num_bpc;
u8 dsc_bpc[3] = {0};
+ u8 dsc_max_bpc;
+
+ /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
+ if (DISPLAY_VER(i915) >= 12)
+ dsc_max_bpc = min_t(u8, 12, max_req_bpc);
+ else
+ dsc_max_bpc = min_t(u8, 10, max_req_bpc);
num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
dsc_bpc);
@@ -1129,10 +1156,6 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
*/
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
- ret = intel_dsc_compute_params(encoder, crtc_state);
- if (ret)
- return ret;
-
/*
* Slice Height of 8 works for all currently available panels. So start
* with that if pic_height is an integral multiple of 8. Eventually add
@@ -1145,6 +1168,10 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
else
vdsc_cfg->slice_height = 2;
+ ret = intel_dsc_compute_params(encoder, crtc_state);
+ if (ret)
+ return ret;
+
vdsc_cfg->dsc_version_major =
(intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
@@ -1186,7 +1213,6 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- u8 dsc_max_bpc;
int pipe_bpp;
int ret;
@@ -1196,14 +1222,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL;
- /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
- if (DISPLAY_VER(dev_priv) >= 12)
- dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
- else
- dsc_max_bpc = min_t(u8, 10,
- conn_state->max_requested_bpc);
-
- pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
+ pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
/* Min Input BPC for ICL+ is 8 */
if (pipe_bpp < 8 * 3) {
@@ -1238,7 +1257,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay,
- pipe_config->bigjoiner);
+ pipe_config->bigjoiner,
+ pipe_bpp);
dsc_dp_slice_count =
intel_dp_dsc_get_slice_count(intel_dp,
adjusted_mode->crtc_clock,
@@ -1350,9 +1370,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
*/
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
- /* enable compression if the mode doesn't fit available BW */
+ /*
+ * Pipe joiner needs compression upto display12 due to BW limitation. DG2
+ * onwards pipe joiner can be enabled without compression.
+ */
drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
- if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) {
+ if (ret || intel_dp->force_dsc_en || (DISPLAY_VER(i915) < 13 &&
+ pipe_config->bigjoiner)) {
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits);
if (ret < 0)
@@ -1812,7 +1836,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
if (ret < 0)
drm_dbg_kms(&i915->drm,
"Failed to %s sink decompression state\n",
- enable ? "enable" : "disable");
+ enabledisable(enable));
}
static void
@@ -2244,8 +2268,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
- drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
- enableddisabled(intel_dp->has_hdmi_sink));
+ drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
+ enabledisable(intel_dp->has_hdmi_sink));
tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
@@ -2253,8 +2277,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
drm_dbg_kms(&i915->drm,
- "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n",
- enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
+ "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
+ enabledisable(intel_dp->dfp.ycbcr_444_to_420));
tmp = 0;
if (intel_dp->dfp.rgb_to_ycbcr) {
@@ -2291,8 +2315,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
drm_dbg_kms(&i915->drm,
- "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n",
- enableddisabled(tmp ? true : false));
+ "Failed to %s protocol converter RGB->YCbCr conversion mode\n",
+ enabledisable(tmp));
}
@@ -2812,31 +2836,25 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
/* TODO: Add DSC case (DIP_ENABLE_PPS) */
/* When PSR is enabled, this routine doesn't disable VSC DIP */
- if (intel_psr_enabled(intel_dp))
- val &= ~dip_enable;
- else
- val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
-
- if (!enable) {
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
- return;
- }
+ if (!crtc_state->has_psr)
+ val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
intel_de_write(dev_priv, reg, val);
intel_de_posting_read(dev_priv, reg);
+ if (!enable)
+ return;
+
/* When PSR is enabled, VSC SDP is handled by PSR routine */
- if (!intel_psr_enabled(intel_dp))
+ if (!crtc_state->has_psr)
intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
@@ -2963,14 +2981,13 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
struct drm_dp_vsc_sdp *vsc)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = DP_SDP_VSC;
struct dp_sdp sdp = {};
int ret;
/* When PSR is enabled, VSC SDP is handled by PSR routine */
- if (intel_psr_enabled(intel_dp))
+ if (crtc_state->has_psr)
return;
if ((crtc_state->infoframes.enable &
@@ -5359,7 +5376,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_add_properties(intel_dp, connector);
if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
- int ret = intel_dp_init_hdcp(dig_port, intel_connector);
+ int ret = intel_dp_hdcp_init(dig_port, intel_connector);
if (ret)
drm_dbg_kms(&dev_priv->drm,
"HDCP init failed, skipping.\n");
@@ -5392,6 +5409,9 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
for_each_intel_encoder(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp;
@@ -5412,6 +5432,9 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
for_each_intel_encoder(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp;
int ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 8db5062f6c4a..680631b5b437 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -119,9 +119,6 @@ void intel_ddi_update_pipe(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
- struct intel_connector *intel_connector);
-
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
void intel_dp_sync_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 7e83bc2cc34a..7c048d2ecf43 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -126,12 +126,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(dig_port->base.base.dev);
- u32 precharge, timeout;
-
- if (IS_SANDYBRIDGE(dev_priv))
- precharge = 3;
- else
- precharge = 5;
+ u32 timeout;
/* Max timeout value on G4x-BDW: 1.6ms */
if (IS_BROADWELL(dev_priv))
@@ -146,7 +141,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
timeout |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
@@ -607,8 +602,8 @@ static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- case AUX_CH_USBC5:
- case AUX_CH_USBC6:
+ case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */
+ case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */
return DP_AUX_CH_CTL(aux_ch);
default:
MISSING_CASE(aux_ch);
@@ -630,8 +625,8 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- case AUX_CH_USBC5:
- case AUX_CH_USBC6:
+ case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */
+ case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */
return DP_AUX_CH_DATA(aux_ch, index);
default:
MISSING_CASE(aux_ch);
@@ -682,10 +677,15 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
+ intel_dp->aux.drm_dev = &dev_priv->drm;
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
- if (DISPLAY_VER(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
+ if (DISPLAY_VER(dev_priv) >= 13 && aux_ch >= AUX_CH_D_XELPD)
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
+ aux_ch_name(aux_ch - AUX_CH_D_XELPD + AUX_CH_D),
+ encoder->base.name);
+ else if (DISPLAY_VER(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
aux_ch - AUX_CH_USBC1 + '1',
encoder->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 4f8337c7fd2e..8e9ac9ba1d38 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -291,7 +291,7 @@ static void set_vesa_backlight_enable(struct intel_dp *intel_dp, bool enable)
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
reg_val) != 1) {
drm_dbg_kms(&i915->drm, "Failed to %s aux backlight\n",
- enable ? "enable" : "disable");
+ enabledisable(enable));
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 90868e156c69..d697d169e8c1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -11,9 +11,11 @@
#include <drm/drm_hdcp.h>
#include <drm/drm_print.h>
-#include "intel_display_types.h"
#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_hdcp.h"
#include "intel_hdcp.h"
static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
@@ -532,7 +534,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
- ktime_t msg_end;
+ ktime_t msg_end = ktime_set(0, 0);
bool msg_expired;
hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
@@ -835,7 +837,7 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.protocol = HDCP_PROTOCOL_DP,
};
-int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
+int intel_dp_hdcp_init(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.h b/drivers/gpu/drm/i915/display/intel_dp_hdcp.h
new file mode 100644
index 000000000000..eff5ec5c5021
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_HDCP___
+#define __INTEL_DP_HDCP___
+
+struct intel_connector;
+struct intel_digital_port;
+
+int intel_dp_hdcp_init(struct intel_digital_port *dig_port,
+ struct intel_connector *intel_connector);
+
+#endif /* __INTEL_DP_HDCP___ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 02a003fd48fb..08bceae40aa8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -37,7 +37,7 @@ intel_dp_dump_link_status(struct drm_device *drm,
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
- memset(&intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
+ memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
}
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
@@ -128,49 +128,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
}
-/**
- * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
- * @intel_dp: Intel DP struct
- *
- * Read the LTTPR common and DPRX capabilities and switch to non-transparent
- * link training mode if any is detected and read the PHY capabilities for all
- * detected LTTPRs. In case of an LTTPR detection error or if the number of
- * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
- * transparent mode link training mode.
- *
- * Returns:
- * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
- * DPRX capabilities are read out.
- * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
- * detection failure and the transparent LT mode was set. The DPRX
- * capabilities are read out.
- * <0 Reading out the DPRX capabilities failed.
- */
-int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
{
int lttpr_count;
- bool ret;
int i;
- ret = intel_dp_read_lttpr_common_caps(intel_dp);
-
- /* The DPTX shall read the DPRX caps after LTTPR detection. */
- if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
- intel_dp_reset_lttpr_common_caps(intel_dp);
- return -EIO;
- }
-
- if (!ret)
- return 0;
-
- /*
- * The 0xF0000-0xF02FF range is only valid if the DPCD revision is
- * at least 1.4.
- */
- if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
- intel_dp_reset_lttpr_common_caps(intel_dp);
+ if (!intel_dp_read_lttpr_common_caps(intel_dp))
return 0;
- }
lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
/*
@@ -211,6 +175,37 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
return lttpr_count;
}
+
+/**
+ * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
+ * @intel_dp: Intel DP struct
+ *
+ * Read the LTTPR common and DPRX capabilities and switch to non-transparent
+ * link training mode if any is detected and read the PHY capabilities for all
+ * detected LTTPRs. In case of an LTTPR detection error or if the number of
+ * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
+ * transparent mode link training mode.
+ *
+ * Returns:
+ * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
+ * DPRX capabilities are read out.
+ * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
+ * detection failure and the transparent LT mode was set. The DPRX
+ * capabilities are read out.
+ * <0 Reading out the DPRX capabilities failed.
+ */
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+{
+ int lttpr_count = intel_dp_init_lttpr(intel_dp);
+
+ /* The DPTX shall read the DPRX caps after LTTPR detection. */
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
+ intel_dp_reset_lttpr_common_caps(intel_dp);
+ return -EIO;
+ }
+
+ return lttpr_count;
+}
EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
static u8 dp_voltage_max(u8 preemph)
@@ -513,7 +508,7 @@ static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_d
enum drm_dp_phy dp_phy)
{
if (dp_phy == DP_PHY_DPRX)
- drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+ drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd);
else
drm_dp_lttpr_link_train_clock_recovery_delay();
}
@@ -665,11 +660,11 @@ intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
if (dp_phy == DP_PHY_DPRX) {
- drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
+ drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd);
} else {
const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
- drm_dp_lttpr_link_train_channel_eq_delay(phy_caps);
+ drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 2daa3f67791e..b170e272bdee 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -32,13 +32,16 @@
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
+#include "intel_crtc.h"
#include "intel_ddi.h"
+#include "intel_de.h"
#include "intel_display_types.h"
-#include "intel_hotplug.h"
#include "intel_dp.h"
+#include "intel_dp_hdcp.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
+#include "intel_hotplug.h"
#include "skl_scaler.h"
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
@@ -70,7 +73,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
connector->port,
crtc_state->pbn,
- drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
+ crtc_state->port_clock,
crtc_state->lane_count));
if (slots == -EDEADLK)
return slots;
@@ -154,7 +158,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -832,7 +836,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_broadcast_rgb_property(connector);
if (DISPLAY_VER(dev_priv) <= 12) {
- ret = intel_dp_init_hdcp(dig_port, intel_connector);
+ ret = intel_dp_hdcp_init(dig_port, intel_connector);
if (ret)
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
connector->name, connector->base.id);
@@ -941,6 +945,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
struct intel_dp *intel_dp = &dig_port->dp;
enum port port = dig_port->base.port;
int ret;
+ int max_source_rate =
+ intel_dp->source_rates[intel_dp->num_source_rates - 1];
if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
return 0;
@@ -956,7 +962,10 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
/* create encoders */
intel_dp_create_fake_mst_encoders(dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
- &intel_dp->aux, 16, 3, conn_base_id);
+ &intel_dp->aux, 16, 3,
+ dig_port->max_lanes,
+ max_source_rate,
+ conn_base_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 514c4a7adffc..48507ed79950 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -23,6 +23,7 @@
#include "display/intel_dp.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_sideband.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 3e3c5eed1600..89635da9f6f6 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -4,6 +4,7 @@
*/
#include <linux/kernel.h>
#include "intel_crtc.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_display.h"
#include "intel_dpll.h"
@@ -366,13 +367,11 @@ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
return false;
- if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
+ if (!IS_PINEVIEW(dev_priv) && !IS_LP(dev_priv))
if (clock->m1 <= clock->m2)
return false;
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_GEN9_LP(dev_priv)) {
+ if (!IS_LP(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
return false;
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -1358,7 +1357,7 @@ intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
else if (IS_PINEVIEW(dev_priv))
dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
- else if (!IS_DISPLAY_VER(dev_priv, 2))
+ else if (DISPLAY_VER(dev_priv) != 2)
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
else
dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 7ff4b0d29ed1..88247027fd5a 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -6,6 +6,8 @@
#ifndef _INTEL_DPLL_H_
#define _INTEL_DPLL_H_
+#include <linux/types.h>
+
struct dpll;
struct drm_i915_private;
struct intel_crtc;
@@ -37,5 +39,8 @@ void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+ struct dpll *best_clock);
+int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 1ae158d12c07..71ac57670043 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -21,8 +21,10 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
/**
@@ -147,6 +149,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
pll->info->name, onoff(state), onoff(cur_state));
}
+static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
+{
+ return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
+}
+
+enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
+{
+ return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
+}
+
static i915_reg_t
intel_combo_pll_enable_reg(struct drm_i915_private *i915,
struct intel_shared_dpll *pll)
@@ -159,6 +171,19 @@ intel_combo_pll_enable_reg(struct drm_i915_private *i915,
return CNL_DPLL_ENABLE(pll->info->id);
}
+static i915_reg_t
+intel_tc_pll_enable_reg(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+
+ if (IS_ALDERLAKE_P(i915))
+ return ADLP_PORTTC_PLL_ENABLE(tc_port);
+
+ return MG_PLL_ENABLE(tc_port);
+}
+
/**
* intel_prepare_shared_dpll - call a dpll's prepare hook
* @crtc_state: CRTC, and its state, which has a shared dpll
@@ -3118,16 +3143,6 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
}
-static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
-{
- return id - DPLL_ID_ICL_MGPLL1;
-}
-
-enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
-{
- return tc_port + DPLL_ID_ICL_MGPLL1;
-}
-
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
struct intel_dpll_hw_state *state,
@@ -3726,12 +3741,14 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
bool ret = false;
u32 val;
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
+
wakeref = intel_display_power_get_if_enabled(dev_priv,
POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
- val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
+ val = intel_de_read(dev_priv, enable_reg);
if (!(val & PLL_ENABLE))
goto out;
@@ -3795,7 +3812,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
+ val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
if (!(val & PLL_ENABLE))
goto out;
@@ -4167,8 +4184,7 @@ static void tbt_pll_enable(struct drm_i915_private *dev_priv,
static void mg_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- i915_reg_t enable_reg =
- MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
icl_pll_power_enable(dev_priv, pll, enable_reg);
@@ -4247,8 +4263,7 @@ static void tbt_pll_disable(struct drm_i915_private *dev_priv,
static void mg_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- i915_reg_t enable_reg =
- MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
icl_pll_disable(dev_priv, pll, enable_reg);
}
@@ -4414,6 +4429,26 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct dpll_info adlp_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr adlp_pll_mgr = {
+ .dpll_info = adlp_plls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@@ -4427,7 +4462,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_ALDERLAKE_S(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv))
+ dpll_mgr = &adlp_pll_mgr;
+ else if (IS_ALDERLAKE_S(dev_priv))
dpll_mgr = &adls_pll_mgr;
else if (IS_DG1(dev_priv))
dpll_mgr = &dg1_pll_mgr;
@@ -4441,10 +4478,10 @@ void intel_shared_dpll_init(struct drm_device *dev)
dpll_mgr = &icl_pll_mgr;
else if (IS_CANNONLAKE(dev_priv))
dpll_mgr = &cnl_pll_mgr;
- else if (IS_GEN9_BC(dev_priv))
- dpll_mgr = &skl_pll_mgr;
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
dpll_mgr = &bxt_pll_mgr;
+ else if (DISPLAY_VER(dev_priv) == 9)
+ dpll_mgr = &skl_pll_mgr;
else if (HAS_DDI(dev_priv))
dpll_mgr = &hsw_pll_mgr;
else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 857126822a88..62a8a69f9f5d 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -5,6 +5,7 @@
*/
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#define DSB_BUF_SIZE (2 * PAGE_SIZE)
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index 625f2f1ae061..50d6da0b2419 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -124,6 +124,7 @@ struct intel_dsi {
u16 panel_on_delay;
u16 panel_off_delay;
u16 panel_pwr_cycle_delay;
+ ktime_t panel_power_off_time;
};
struct intel_dsi_host {
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 090cd76266c6..77419f8c05e9 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -33,6 +33,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index fca41ac5b8e1..c60a81a81c09 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -30,14 +30,6 @@ bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
plane == 2;
}
-bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
-{
- if (is_ccs_modifier(fb->modifier))
- return is_ccs_plane(fb, plane);
-
- return plane == 1;
-}
-
bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane)
{
return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
@@ -84,7 +76,7 @@ int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
unsigned int intel_tile_size(const struct drm_i915_private *i915)
{
- return IS_DISPLAY_VER(i915, 2) ? 2048 : 4096;
+ return DISPLAY_VER(i915) == 2 ? 2048 : 4096;
}
unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
@@ -171,17 +163,17 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
*vsub = 32;
}
-static void intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
+static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h)
{
- int main_plane = is_ccs_plane(fb, color_plane) ?
- skl_ccs_to_main_plane(fb, color_plane) : 0;
+ int main_plane = is_ccs_plane(&fb->base, color_plane) ?
+ skl_ccs_to_main_plane(&fb->base, color_plane) : 0;
int main_hsub, main_vsub;
int hsub, vsub;
- intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
- intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
- *w = fb->width / main_hsub / hsub;
- *h = fb->height / main_vsub / vsub;
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane);
+ intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane);
+ *w = fb->base.width / main_hsub / hsub;
+ *h = fb->base.height / main_vsub / vsub;
}
static u32 intel_adjust_tile_offset(int *x, int *y,
@@ -363,8 +355,17 @@ static int intel_fb_offset_to_xy(int *x, int *y,
unsigned int height;
u32 alignment;
- if (DISPLAY_VER(i915) >= 12 &&
- is_semiplanar_uv_plane(fb, color_plane))
+ /*
+ * All DPT color planes must be 512*4k aligned (the amount mapped by a
+ * single DPT page). For ADL_P CCS FBs this only works by requiring
+ * the allocated offsets to be 2MB aligned. Once supoort to remap
+ * such FBs is added we can remove this requirement, as then all the
+ * planes can be remapped to an aligned offset.
+ */
+ if (IS_ALDERLAKE_P(i915) && is_ccs_modifier(fb->modifier))
+ alignment = 512 * 4096;
+ else if (DISPLAY_VER(i915) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
alignment = intel_tile_row_size(fb, color_plane);
else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
alignment = intel_tile_size(i915);
@@ -486,9 +487,12 @@ static bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
return true;
}
-static bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
+bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
{
- return false;
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+
+ return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR &&
+ !is_ccs_modifier(fb->base.modifier);
}
static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
@@ -609,7 +613,11 @@ plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane,
unsigned int pitch_tiles)
{
if (intel_fb_needs_pot_stride_remap(fb))
- return roundup_pow_of_two(pitch_tiles);
+ /*
+ * ADL_P, the only platform needing a POT stride has a minimum
+ * of 8 stride tiles.
+ */
+ return roundup_pow_of_two(max(pitch_tiles, 8u));
else
return pitch_tiles;
}
@@ -743,19 +751,34 @@ static void intel_fb_view_init(struct intel_fb_view *view, enum i915_ggtt_view_t
view->gtt.type = view_type;
}
-int intel_fill_fb_info(struct drm_i915_private *i915, struct drm_framebuffer *fb)
+bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb)
{
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ if (DISPLAY_VER(to_i915(fb->base.dev)) >= 13)
+ return false;
+
+ return fb->base.modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->base.modifier == I915_FORMAT_MOD_Yf_TILED;
+}
+
+int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb)
+{
+ struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base);
u32 gtt_offset_rotated = 0;
u32 gtt_offset_remapped = 0;
unsigned int max_size = 0;
- int i, num_planes = fb->format->num_planes;
+ int i, num_planes = fb->base.format->num_planes;
unsigned int tile_size = intel_tile_size(i915);
- intel_fb_view_init(&intel_fb->normal_view, I915_GGTT_VIEW_NORMAL);
- intel_fb_view_init(&intel_fb->rotated_view, I915_GGTT_VIEW_ROTATED);
- intel_fb_view_init(&intel_fb->remapped_view, I915_GGTT_VIEW_REMAPPED);
+ intel_fb_view_init(&fb->normal_view, I915_GGTT_VIEW_NORMAL);
+
+ drm_WARN_ON(&i915->drm,
+ intel_fb_supports_90_270_rotation(fb) &&
+ intel_fb_needs_pot_stride_remap(fb));
+
+ if (intel_fb_supports_90_270_rotation(fb))
+ intel_fb_view_init(&fb->rotated_view, I915_GGTT_VIEW_ROTATED);
+ if (intel_fb_needs_pot_stride_remap(fb))
+ intel_fb_view_init(&fb->remapped_view, I915_GGTT_VIEW_REMAPPED);
for (i = 0; i < num_planes; i++) {
struct fb_plane_view_dims view_dims;
@@ -770,45 +793,43 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct drm_framebuffer *fb
* is consumed by the driver and not passed to DE. Skip the
* arithmetic related to alignment and offset calculation.
*/
- if (is_gen12_ccs_cc_plane(fb, i)) {
- if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
+ if (is_gen12_ccs_cc_plane(&fb->base, i)) {
+ if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE))
continue;
else
return -EINVAL;
}
- cpp = fb->format->cpp[i];
- intel_fb_plane_dims(&width, &height, fb, i);
+ cpp = fb->base.format->cpp[i];
+ intel_fb_plane_dims(fb, i, &width, &height);
- ret = convert_plane_offset_to_xy(intel_fb, i, width, &x, &y);
+ ret = convert_plane_offset_to_xy(fb, i, width, &x, &y);
if (ret)
return ret;
- init_plane_view_dims(intel_fb, i, width, height, &view_dims);
+ init_plane_view_dims(fb, i, width, height, &view_dims);
/*
* First pixel of the framebuffer from
* the start of the normal gtt mapping.
*/
- intel_fb->normal_view.color_plane[i].x = x;
- intel_fb->normal_view.color_plane[i].y = y;
- intel_fb->normal_view.color_plane[i].stride = intel_fb->base.pitches[i];
+ fb->normal_view.color_plane[i].x = x;
+ fb->normal_view.color_plane[i].y = y;
+ fb->normal_view.color_plane[i].stride = fb->base.pitches[i];
- offset = calc_plane_aligned_offset(intel_fb, i, &x, &y);
+ offset = calc_plane_aligned_offset(fb, i, &x, &y);
- /* Y or Yf modifiers required for 90/270 rotation */
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED)
- gtt_offset_rotated += calc_plane_remap_info(intel_fb, i, &view_dims,
+ if (intel_fb_supports_90_270_rotation(fb))
+ gtt_offset_rotated += calc_plane_remap_info(fb, i, &view_dims,
offset, gtt_offset_rotated, x, y,
- &intel_fb->rotated_view);
+ &fb->rotated_view);
- if (intel_fb_needs_pot_stride_remap(intel_fb))
- gtt_offset_remapped += calc_plane_remap_info(intel_fb, i, &view_dims,
+ if (intel_fb_needs_pot_stride_remap(fb))
+ gtt_offset_remapped += calc_plane_remap_info(fb, i, &view_dims,
offset, gtt_offset_remapped, x, y,
- &intel_fb->remapped_view);
+ &fb->remapped_view);
- size = calc_plane_normal_size(intel_fb, i, &view_dims, x, y);
+ size = calc_plane_normal_size(fb, i, &view_dims, x, y);
/* how many tiles in total needed in the bo */
max_size = max(max_size, offset + size);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 6acf792a8c44..739d1b91754b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -19,7 +19,6 @@ struct intel_plane_state;
bool is_ccs_plane(const struct drm_framebuffer *fb, int plane);
bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane);
bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane);
-bool is_aux_plane(const struct drm_framebuffer *fb, int plane);
bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane);
bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane);
@@ -46,7 +45,10 @@ u32 intel_plane_compute_aligned_offset(int *x, int *y,
const struct intel_plane_state *state,
int color_plane);
-int intel_fill_fb_info(struct drm_i915_private *i915, struct drm_framebuffer *fb);
+bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb);
+bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb);
+
+int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb);
void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotation,
struct intel_fb_view *view);
int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 957252b695d7..1847a161cb37 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -43,6 +43,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -67,7 +68,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (IS_DISPLAY_VER(dev_priv, 7))
+ if (DISPLAY_VER(dev_priv) == 7)
lines = min(lines, 2048);
else if (DISPLAY_VER(dev_priv) >= 8)
lines = min(lines, 2560);
@@ -109,7 +110,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
cfb_pitch = params->fb.stride;
/* FBC_CTL wants 32B or 64B units */
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
@@ -118,7 +119,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
intel_de_write(dev_priv, FBC_TAG(i), 0);
- if (IS_DISPLAY_VER(dev_priv, 4)) {
+ if (DISPLAY_VER(dev_priv) == 4) {
u32 fbc_ctl2;
/* Set it up... */
@@ -302,7 +303,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
int threshold = dev_priv->fbc.threshold;
/* Display WA #0529: skl, kbl, bxt. */
- if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) == 9) {
u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
@@ -445,7 +446,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
- if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
+ if (IS_BROADWELL(dev_priv) || (DISPLAY_VER(dev_priv) == 9 &&
+ !IS_BROXTON(dev_priv)))
end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -590,14 +592,14 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
if (stride < 512)
return false;
- if (IS_DISPLAY_VER(dev_priv, 2) || IS_DISPLAY_VER(dev_priv, 3))
+ if (DISPLAY_VER(dev_priv) == 2 || DISPLAY_VER(dev_priv) == 3)
return stride == 4096 || stride == 8192;
- if (IS_DISPLAY_VER(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
+ if (DISPLAY_VER(dev_priv) == 4 && !IS_G4X(dev_priv) && stride < 2048)
return false;
/* Display WA #1105: skl,bxt,kbl,cfl,glk */
- if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
return false;
@@ -617,7 +619,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
return false;
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
@@ -735,11 +737,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fence_y_offset = intel_plane_fence_y_offset(plane_state);
drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
- !plane_state->vma->fence);
+ !plane_state->ggtt_vma->fence);
if (plane_state->flags & PLANE_HAS_FENCE &&
- plane_state->vma->fence)
- cache->fence_id = plane_state->vma->fence->id;
+ plane_state->ggtt_vma->fence)
+ cache->fence_id = plane_state->ggtt_vma->fence->id;
else
cache->fence_id = -1;
@@ -759,7 +761,7 @@ static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
- if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) == 9) &&
cache->fb.modifier != I915_FORMAT_MOD_X_TILED)
return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
else
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index ccd00e65a5fe..4af40229f5ec 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -41,6 +41,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include "gem/i915_gem_lmem.h"
+
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_fbdev.h"
@@ -137,14 +139,22 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
size = PAGE_ALIGN(size);
- /* If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features. */
obj = ERR_PTR(-ENODEV);
- if (size * 2 < dev_priv->stolen_usable_size)
- obj = i915_gem_object_create_stolen(dev_priv, size);
- if (IS_ERR(obj))
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ if (HAS_LMEM(dev_priv)) {
+ obj = i915_gem_object_create_lmem(dev_priv, size,
+ I915_BO_ALLOC_CONTIGUOUS);
+ } else {
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (size * 2 < dev_priv->stolen_usable_size)
+ obj = i915_gem_object_create_stolen(dev_priv, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_shmem(dev_priv, size);
+ }
+
if (IS_ERR(obj)) {
drm_err(&dev_priv->drm, "failed to allocate framebuffer\n");
return PTR_ERR(obj);
@@ -178,6 +188,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
unsigned long flags = 0;
bool prealloc = false;
void __iomem *vaddr;
+ struct drm_i915_gem_object *obj;
int ret;
if (intel_fb &&
@@ -232,13 +243,27 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = ggtt->gmadr.start;
- info->apertures->ranges[0].size = ggtt->mappable_end;
+ obj = intel_fb_obj(&intel_fb->base);
+ if (i915_gem_object_is_lmem(obj)) {
+ struct intel_memory_region *mem = obj->mm.region;
+
+ info->apertures->ranges[0].base = mem->io_start;
+ info->apertures->ranges[0].size = mem->total;
+
+ /* Use fbdev's framebuffer from lmem for discrete */
+ info->fix.smem_start =
+ (unsigned long)(mem->io_start +
+ i915_gem_object_get_dma_address(obj, 0));
+ info->fix.smem_len = obj->base.size;
+ } else {
+ info->apertures->ranges[0].base = ggtt->gmadr.start;
+ info->apertures->ranges[0].size = ggtt->mappable_end;
- /* Our framebuffer is the entirety of fbdev's system memory */
- info->fix.smem_start =
- (unsigned long)(ggtt->gmadr.start + vma->node.start);
- info->fix.smem_len = vma->node.size;
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start =
+ (unsigned long)(ggtt->gmadr.start + vma->node.start);
+ info->fix.smem_len = vma->node.size;
+ }
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index d719cd9c5b73..cef1061fd6cb 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -5,6 +5,7 @@
#include "intel_atomic.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 9605a1064366..eb841960840d 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fifo_underrun.h"
@@ -184,15 +185,34 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
}
}
+static u32
+icl_pipe_status_underrun_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask = PIPE_STATUS_UNDERRUN;
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ mask |= PIPE_STATUS_SOFT_UNDERRUN_XELPD |
+ PIPE_STATUS_HARD_UNDERRUN_XELPD |
+ PIPE_STATUS_PORT_UNDERRUN_XELPD;
+
+ return mask;
+}
+
static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 mask = gen8_de_pipe_underrun_mask(dev_priv);
- if (enable)
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
- else
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ if (enable) {
+ if (DISPLAY_VER(dev_priv) >= 11)
+ intel_de_write(dev_priv, ICL_PIPESTATUS(pipe),
+ icl_pipe_status_underrun_mask(dev_priv));
+
+ bdw_enable_pipe_irq(dev_priv, pipe, mask);
+ } else {
+ bdw_disable_pipe_irq(dev_priv, pipe, mask);
+ }
}
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -271,7 +291,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
ilk_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_DISPLAY_VER(dev_priv, 7))
+ else if (DISPLAY_VER(dev_priv) == 7)
ivb_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (DISPLAY_VER(dev_priv) >= 8)
bdw_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -372,6 +392,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ u32 underruns = 0;
/* We may be called too early in init, thanks BIOS! */
if (crtc == NULL)
@@ -382,10 +403,35 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
crtc->cpu_fifo_underrun_disabled)
return;
+ /*
+ * Starting with display version 11, the PIPE_STAT register records
+ * whether an underrun has happened, and on XELPD+, it will also record
+ * whether the underrun was soft/hard and whether it was triggered by
+ * the downstream port logic. We should clear these bits (which use
+ * write-1-to-clear logic) too.
+ *
+ * Note that although the IIR gives us the same underrun and soft/hard
+ * information, PIPE_STAT is the only place we can find out whether
+ * the underrun was caused by the downstream port.
+ */
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ underruns = intel_de_read(dev_priv, ICL_PIPESTATUS(pipe)) &
+ icl_pipe_status_underrun_mask(dev_priv);
+ intel_de_write(dev_priv, ICL_PIPESTATUS(pipe), underruns);
+ }
+
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n",
- pipe_name(pipe));
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun: %s%s%s%s\n",
+ pipe_name(pipe),
+ underruns & PIPE_STATUS_SOFT_UNDERRUN_XELPD ? "soft," : "",
+ underruns & PIPE_STATUS_HARD_UNDERRUN_XELPD ? "hard," : "",
+ underruns & PIPE_STATUS_PORT_UNDERRUN_XELPD ? "port," : "",
+ underruns & PIPE_STATUS_UNDERRUN ? "transcoder," : "");
+ else
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
}
intel_fbc_handle_fifo_underrun_irq(dev_priv);
@@ -432,7 +478,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
if (HAS_GMCH(dev_priv))
i9xx_check_fifo_underruns(crtc);
- else if (IS_DISPLAY_VER(dev_priv, 7))
+ else if (DISPLAY_VER(dev_priv) == 7)
ivb_check_fifo_underruns(crtc);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 6fc6965b6133..8e75debcce1a 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -58,6 +58,7 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -87,6 +88,8 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
if (!frontbuffer_bits)
return;
+ trace_intel_frontbuffer_flush(frontbuffer_bits, origin);
+
might_sleep();
intel_edp_drrs_flush(i915, frontbuffer_bits);
intel_psr_flush(i915, frontbuffer_bits, origin);
@@ -173,6 +176,8 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
spin_unlock(&i915->fb_tracking.lock);
}
+ trace_intel_frontbuffer_invalidate(frontbuffer_bits, origin);
+
might_sleep();
intel_psr_invalidate(i915, frontbuffer_bits, origin);
intel_edp_drrs_invalidate(i915, frontbuffer_bits);
@@ -206,7 +211,6 @@ static int frontbuffer_active(struct i915_active *ref)
return 0;
}
-__i915_active_call
static void frontbuffer_retire(struct i915_active *ref)
{
struct intel_frontbuffer *front =
@@ -261,7 +265,8 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
atomic_set(&front->bits, 0);
i915_active_init(&front->write,
frontbuffer_active,
- i915_active_may_sleep(frontbuffer_retire));
+ frontbuffer_retire,
+ I915_ACTIVE_RETIRE_SLEEPS);
spin_lock(&i915->fb_tracking.lock);
if (rcu_access_pointer(obj->frontbuffer)) {
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 8ddc20daef64..fcf47f98ea36 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -34,6 +34,7 @@
#include <drm/drm_hdcp.h>
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
@@ -107,9 +108,9 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
return &gmbus_pins_icp[pin];
else if (HAS_PCH_CNP(dev_priv))
return &gmbus_pins_cnp[pin];
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
return &gmbus_pins_bxt[pin];
- else if (IS_GEN9_BC(dev_priv))
+ else if (DISPLAY_VER(dev_priv) == 9)
return &gmbus_pins_skl[pin];
else if (IS_BROADWELL(dev_priv))
return &gmbus_pins_bdw[pin];
@@ -128,9 +129,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
size = ARRAY_SIZE(gmbus_pins_icp);
else if (HAS_PCH_CNP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_cnp);
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
- else if (IS_GEN9_BC(dev_priv))
+ else if (DISPLAY_VER(dev_priv) == 9)
size = ARRAY_SIZE(gmbus_pins_skl);
else if (IS_BROADWELL(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bdw);
@@ -600,7 +601,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
int ret = 0;
/* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_gmbus_clock_gating(dev_priv, false);
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
pch_gmbus_clock_gating(dev_priv, false);
@@ -713,7 +714,7 @@ timeout:
out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_gmbus_clock_gating(dev_priv, true);
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
pch_gmbus_clock_gating(dev_priv, true);
@@ -845,9 +846,6 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (!HAS_DISPLAY(dev_priv))
- return 0;
-
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index d8570e14fe60..ebc2e32aec0b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -18,6 +18,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_display_power.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sideband.h"
@@ -286,11 +287,12 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
/*
* Initiate loading the HDCP key from fuses.
*
- * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
- * platforms except BXT and GLK, differ in the key load trigger process
- * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
+ * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
+ * version 9 platforms (minus BXT) differ in the key load trigger
+ * process from other platforms. These platforms use the GT Driver
+ * Mailbox interface.
*/
- if (IS_GEN9_BC(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
ret = sandybridge_pcode_write(dev_priv,
SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index d69f0a6dc26d..7e51c98c475e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -43,6 +43,7 @@
#include "intel_atomic.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_gmbus.h"
@@ -531,6 +532,11 @@ void hsw_write_infoframe(struct intel_encoder *encoder,
hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
0);
+ /* Wa_14013475917 */
+ if (DISPLAY_VER(dev_priv) == 13 && crtc_state->has_psr &&
+ type == DP_SDP_VSC)
+ return;
+
val |= hsw_infoframe_enable(type);
intel_de_write(dev_priv, ctl_reg, val);
intel_de_posting_read(dev_priv, ctl_reg);
@@ -542,11 +548,9 @@ void hsw_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, HSW_TVIDEO_DIP_CTL(cpu_transcoder));
-
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv,
hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2));
@@ -1251,8 +1255,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
enable ? "Enabling" : "Disabling");
- drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
- adapter, enable);
+ drm_dp_dual_mode_set_tmds_output(&dev_priv->drm, hdmi->dp_dual_mode.type, adapter, enable);
}
static int intel_hdmi_hdcp_read(struct intel_digital_port *dig_port,
@@ -1841,7 +1844,8 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_RANGE;
/* BXT/GLK DPLL can't generate 223-240 MHz */
- if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
@@ -1861,6 +1865,73 @@ static int intel_hdmi_port_clock(int clock, int bpc)
return clock * bpc / 8;
}
+static bool intel_hdmi_bpc_possible(struct drm_connector *connector,
+ int bpc, bool has_hdmi_sink, bool ycbcr420_output)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_hdmi_info *hdmi = &info->hdmi;
+
+ switch (bpc) {
+ case 12:
+ if (HAS_GMCH(i915))
+ return false;
+
+ if (!has_hdmi_sink)
+ return false;
+
+ if (ycbcr420_output)
+ return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36;
+ else
+ return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36;
+ case 10:
+ if (DISPLAY_VER(i915) < 11)
+ return false;
+
+ if (!has_hdmi_sink)
+ return false;
+
+ if (ycbcr420_output)
+ return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30;
+ else
+ return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30;
+ case 8:
+ return true;
+ default:
+ MISSING_CASE(bpc);
+ return false;
+ }
+}
+
+static enum drm_mode_status
+intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
+ bool has_hdmi_sink, bool ycbcr420_output)
+{
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ enum drm_mode_status status;
+
+ if (ycbcr420_output)
+ clock /= 2;
+
+ /* check if we can do 8bpc */
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8),
+ true, has_hdmi_sink);
+
+ /* if we can't do 8bpc we may still be able to do 12bpc */
+ if (status != MODE_OK &&
+ intel_hdmi_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output))
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12),
+ true, has_hdmi_sink);
+
+ /* if we can't do 8,12bpc we may still be able to do 10bpc */
+ if (status != MODE_OK &&
+ intel_hdmi_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output))
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10),
+ true, has_hdmi_sink);
+
+ return status;
+}
+
static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -1872,6 +1943,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
int clock = mode->clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
+ bool ycbcr_420_only;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -1888,26 +1960,19 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
clock *= 2;
}
- if (drm_mode_is_420_only(&connector->display_info, mode))
- clock /= 2;
-
- /* check if we can do 8bpc */
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8),
- true, has_hdmi_sink);
+ ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
- if (has_hdmi_sink) {
- /* if we can't do 8bpc we may still be able to do 12bpc */
- if (status != MODE_OK && !HAS_GMCH(dev_priv))
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12),
- true, has_hdmi_sink);
+ status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, ycbcr_420_only);
+ if (status != MODE_OK) {
+ if (ycbcr_420_only ||
+ !connector->ycbcr_420_allowed ||
+ !drm_mode_is_420_also(&connector->display_info, mode))
+ return status;
- /* if we can't do 8,12bpc we may still be able to do 10bpc */
- if (status != MODE_OK && DISPLAY_VER(dev_priv) >= 11)
- status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10),
- true, has_hdmi_sink);
+ status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, true);
+ if (status != MODE_OK)
+ return status;
}
- if (status != MODE_OK)
- return status;
return intel_mode_valid_max_plane_size(dev_priv, mode, false);
}
@@ -1923,32 +1988,12 @@ bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (crtc_state->pipe_bpp < bpc * 3)
return false;
- if (!has_hdmi_sink)
- return false;
-
for_each_new_connector_in_state(state, connector, connector_state, i) {
- const struct drm_display_info *info = &connector->display_info;
-
if (connector_state->crtc != crtc_state->uapi.crtc)
continue;
- if (ycbcr420_output) {
- const struct drm_hdmi_info *hdmi = &info->hdmi;
-
- if (bpc == 12 && !(hdmi->y420_dc_modes &
- DRM_EDID_YCBCR420_DC_36))
- return false;
- else if (bpc == 10 && !(hdmi->y420_dc_modes &
- DRM_EDID_YCBCR420_DC_30))
- return false;
- } else {
- if (bpc == 12 && !(info->edid_hdmi_dc_modes &
- DRM_EDID_HDMI_DC_36))
- return false;
- else if (bpc == 10 && !(info->edid_hdmi_dc_modes &
- DRM_EDID_HDMI_DC_30))
- return false;
- }
+ if (!intel_hdmi_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
+ return false;
}
return true;
@@ -1962,12 +2007,6 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (HAS_GMCH(dev_priv))
- return false;
-
- if (bpc == 10 && DISPLAY_VER(dev_priv) < 11)
- return false;
-
/*
* HDMI deep color affects the clocks, so it's only possible
* when not cloning with other encoder types.
@@ -1977,7 +2016,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
/* Display Wa_1405510057:icl,ehl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- bpc == 10 && IS_DISPLAY_VER(dev_priv, 11) &&
+ bpc == 10 && DISPLAY_VER(dev_priv) == 11 &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -1988,29 +2027,6 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
INTEL_OUTPUT_FORMAT_YCBCR420);
}
-static int
-intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct drm_connector *connector = conn_state->connector;
- struct drm_i915_private *i915 = to_i915(connector->dev);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
-
- if (!drm_mode_is_420_only(&connector->display_info, adjusted_mode))
- return 0;
-
- if (!connector->ycbcr_420_allowed) {
- drm_err(&i915->drm,
- "Platform doesn't support YCBCR420 output\n");
- return -EINVAL;
- }
-
- crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
-
- return intel_pch_panel_fitting(crtc_state, conn_state);
-}
-
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int clock)
@@ -2117,6 +2133,39 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
return intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
+static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int ret;
+ bool ycbcr_420_only;
+
+ ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, adjusted_mode);
+ if (connector->ycbcr_420_allowed && ycbcr_420_only) {
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+ } else {
+ if (!connector->ycbcr_420_allowed && ycbcr_420_only)
+ drm_dbg_kms(&i915->drm,
+ "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ }
+
+ ret = intel_hdmi_compute_clock(encoder, crtc_state);
+ if (ret) {
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ connector->ycbcr_420_allowed &&
+ drm_mode_is_420_also(&connector->display_info, adjusted_mode)) {
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+ ret = intel_hdmi_compute_clock(encoder, crtc_state);
+ }
+ }
+
+ return ret;
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -2141,23 +2190,25 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
- ret = intel_hdmi_ycbcr420_config(pipe_config, conn_state);
- if (ret)
- return ret;
-
- pipe_config->limited_color_range =
- intel_hdmi_limited_color_range(pipe_config, conn_state);
-
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
pipe_config->has_audio =
intel_hdmi_has_audio(encoder, pipe_config, conn_state);
- ret = intel_hdmi_compute_clock(encoder, pipe_config);
+ ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state);
if (ret)
return ret;
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
+ }
+
+ pipe_config->limited_color_range =
+ intel_hdmi_limited_color_range(pipe_config, conn_state);
+
if (conn_state->picture_aspect_ratio)
adjusted_mode->picture_aspect_ratio =
conn_state->picture_aspect_ratio;
@@ -2223,7 +2274,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
enum port port = hdmi_to_dig_port(hdmi)->base.port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
- enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
+ enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(&dev_priv->drm, adapter);
/*
* Type 1 DVI adaptors are not required to implement any
@@ -2256,7 +2307,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
hdmi->dp_dual_mode.type = type;
hdmi->dp_dual_mode.max_tmds_clock =
- drm_dp_dual_mode_max_tmds_clock(type, adapter);
+ drm_dp_dual_mode_max_tmds_clock(&dev_priv->drm, type, adapter);
drm_dbg_kms(&dev_priv->drm,
"DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
@@ -2460,8 +2511,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
drm_connector_attach_content_type_property(connector);
if (DISPLAY_VER(dev_priv) >= 10)
- drm_object_attach_property(&connector->base,
- connector->dev->mode_config.hdr_output_metadata_property, 0);
+ drm_connector_attach_hdr_output_metadata_property(connector);
if (!HAS_GMCH(dev_priv))
drm_connector_attach_max_bpc_property(connector, 8, 12);
@@ -2708,13 +2758,13 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
return ddc_pin;
}
- if (HAS_PCH_ADP(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv))
ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
ddc_pin = dg1_port_to_ddc_pin(dev_priv, port);
else if (IS_ROCKETLAKE(dev_priv))
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
- else if (IS_GEN9_BC(dev_priv) && HAS_PCH_TGP(dev_priv))
+ else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
@@ -2722,7 +2772,7 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
else if (IS_CHERRYVIEW(dev_priv))
ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index f46a1b7190b8..47c85ac97c87 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -595,6 +595,9 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
{
int i;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
for_each_hpd_pin(i) {
dev_priv->hotplug.stats[i].count = 0;
dev_priv->hotplug.stats[i].state = HPD_ENABLED;
@@ -670,6 +673,9 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*/
void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
/*
@@ -702,6 +708,9 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
*/
void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
schedule_work(&dev_priv->hotplug.poll_init_work);
}
@@ -718,6 +727,9 @@ void intel_hpd_init_work(struct drm_i915_private *dev_priv)
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->hotplug.long_port_mask = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index e4ff533e3a69..05d2d750fa53 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -27,6 +27,7 @@
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_edid.h>
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_lspcon.h"
@@ -139,10 +140,11 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
enum drm_lspcon_mode current_mode;
- struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+ struct i2c_adapter *adapter = &intel_dp->aux.ddc;
- if (drm_lspcon_get_mode(adapter, &current_mode)) {
+ if (drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, &current_mode)) {
DRM_DEBUG_KMS("Error reading LSPCON mode\n");
return DRM_LSPCON_MODE_INVALID;
}
@@ -175,11 +177,12 @@ out:
static int lspcon_change_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode mode)
{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
int err;
enum drm_lspcon_mode current_mode;
- struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+ struct i2c_adapter *adapter = &intel_dp->aux.ddc;
- err = drm_lspcon_get_mode(adapter, &current_mode);
+ err = drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, &current_mode);
if (err) {
DRM_ERROR("Error reading LSPCON mode\n");
return err;
@@ -190,7 +193,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
return 0;
}
- err = drm_lspcon_set_mode(adapter, mode);
+ err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, adapter, mode);
if (err < 0) {
DRM_ERROR("LSPCON mode change failed\n");
return err;
@@ -221,7 +224,8 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
{
int retry;
enum drm_dp_dual_mode_type adaptor_type;
- struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct i2c_adapter *adapter = &intel_dp->aux.ddc;
enum drm_lspcon_mode expected_mode;
expected_mode = lspcon_wake_native_aux_ch(lspcon) ?
@@ -232,7 +236,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
if (retry)
usleep_range(500, 1000);
- adaptor_type = drm_dp_dual_mode_detect(adapter);
+ adaptor_type = drm_dp_dual_mode_detect(intel_dp->aux.drm_dev, adapter);
if (adaptor_type == DRM_DP_DUAL_MODE_LSPCON)
break;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index f31a368f34c5..7f40e9f60bc2 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -41,6 +41,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
@@ -280,7 +281,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg.
*/
- if (IS_DISPLAY_VER(dev_priv, 4)) {
+ if (DISPLAY_VER(dev_priv) == 4) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index bbaf05515e88..7e3f5c6ca484 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -34,6 +34,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
@@ -383,8 +384,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
i830_overlay_clock_gating(dev_priv, true);
}
-__i915_active_call static void
-intel_overlay_last_flip_retire(struct i915_active *active)
+static void intel_overlay_last_flip_retire(struct i915_active *active)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
@@ -550,7 +550,7 @@ static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 widt
{
u32 sw;
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
sw = ALIGN((offset & 31) + width, 32);
else
sw = ALIGN((offset & 63) + width, 64);
@@ -820,7 +820,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig |= OCONF_CC_OUT_8BIT;
if (crtc_state->gamma_enable)
oconfig |= OCONF_GAMMA2_ENABLE;
- if (IS_DISPLAY_VER(dev_priv, 4))
+ if (DISPLAY_VER(dev_priv) == 4)
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
@@ -1054,7 +1054,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_DISPLAY_VER(dev_priv, 4) && rec->stride_Y < 512)
+ if (DISPLAY_VER(dev_priv) == 4 && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1281,7 +1281,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (!IS_DISPLAY_VER(dev_priv, 2)) {
+ if (DISPLAY_VER(dev_priv) != 2) {
attrs->gamma0 = intel_de_read(dev_priv, OGAMC0);
attrs->gamma1 = intel_de_read(dev_priv, OGAMC1);
attrs->gamma2 = intel_de_read(dev_priv, OGAMC2);
@@ -1305,7 +1305,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
goto out_unlock;
if (overlay->active) {
@@ -1401,7 +1401,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->saturation = 146;
i915_active_init(&overlay->last_flip,
- NULL, intel_overlay_last_flip_retire);
+ NULL, intel_overlay_last_flip_retire, 0);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
if (ret)
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 10022d1575e1..7d7a60b4d2de 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -35,6 +35,7 @@
#include <linux/pwm.h>
#include "intel_connector.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
#include "intel_dsi_dcs_backlight.h"
@@ -667,7 +668,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
}
- if (IS_DISPLAY_VER(dev_priv, 4)) {
+ if (DISPLAY_VER(dev_priv) == 4) {
mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else {
level <<= 1;
@@ -1040,7 +1041,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight.
*/
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
@@ -1372,6 +1373,9 @@ int intel_backlight_device_register(struct intel_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
+ struct backlight_device *bd;
+ const char *name;
+ int ret = 0;
if (WARN_ON(panel->backlight.device))
return -ENODEV;
@@ -1398,28 +1402,49 @@ int intel_backlight_device_register(struct intel_connector *connector)
else
props.power = FB_BLANK_POWERDOWN;
+ name = kstrdup("intel_backlight", GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
+
/*
- * Note: using the same name independent of the connector prevents
- * registration of multiple backlight devices in the driver.
+ * Using the same name independent of the drm device or connector
+ * prevents registration of multiple backlight devices in the
+ * driver. However, we need to use the default name for backward
+ * compatibility. Use unique names for subsequent backlight devices as a
+ * fallback when the default name already exists.
*/
- panel->backlight.device =
- backlight_device_register("intel_backlight",
- connector->base.kdev,
- connector,
- &intel_backlight_device_ops, &props);
-
- if (IS_ERR(panel->backlight.device)) {
- drm_err(&i915->drm, "Failed to register backlight: %ld\n",
- PTR_ERR(panel->backlight.device));
- panel->backlight.device = NULL;
- return -ENODEV;
+ if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
+ kfree(name);
+ name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
+ i915->drm.primary->index, connector->base.name);
+ if (!name)
+ return -ENOMEM;
+
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
}
+ if (IS_ERR(bd)) {
+ drm_err(&i915->drm,
+ "[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n",
+ connector->base.base.id, connector->base.name, name, PTR_ERR(bd));
+ ret = PTR_ERR(bd);
+ goto out;
+ }
+
+ panel->backlight.device = bd;
+
drm_dbg_kms(&i915->drm,
- "Connector %s backlight sysfs interface registered\n",
- connector->base.name);
+ "[CONNECTOR:%d:%s] backlight device %s registered\n",
+ connector->base.base.id, connector->base.name, name);
- return 0;
+out:
+ kfree(name);
+
+ return ret;
}
void intel_backlight_device_unregister(struct intel_connector *connector)
@@ -1728,7 +1753,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
- if (IS_DISPLAY_VER(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (DISPLAY_VER(dev_priv) == 2 || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev_priv))
@@ -2161,7 +2186,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
panel->backlight.pwm_funcs = &bxt_pwm_funcs;
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
panel->backlight.pwm_funcs = &cnp_pwm_funcs;
@@ -2178,7 +2203,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
} else {
panel->backlight.pwm_funcs = &vlv_pwm_funcs;
}
- } else if (IS_DISPLAY_VER(dev_priv, 4)) {
+ } else if (DISPLAY_VER(dev_priv) == 4) {
panel->backlight.pwm_funcs = &i965_pwm_funcs;
} else {
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 7c8e0d76207f..8ac263f471be 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -30,6 +30,7 @@
#include <linux/seq_file.h>
#include "intel_atomic.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_pipe_crc.h"
@@ -409,7 +410,7 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source, u32 *val)
{
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
return i8xx_pipe_crc_ctl_reg(source, val);
else if (DISPLAY_VER(dev_priv) < 5)
return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
@@ -539,7 +540,7 @@ static int
intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
const enum intel_pipe_crc_source source)
{
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
return i8xx_crc_source_valid(dev_priv, source);
else if (DISPLAY_VER(dev_priv) < 5)
return i9xx_crc_source_valid(dev_priv, source);
@@ -580,13 +581,14 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
return -EINVAL;
}
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
+int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
+ enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
u32 val = 0; /* shut up gcc */
int ret = 0;
@@ -597,7 +599,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
return -EINVAL;
}
- power_domain = POWER_DOMAIN_PIPE(crtc->index);
+ power_domain = POWER_DOMAIN_PIPE(pipe);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref) {
drm_dbg_kms(&dev_priv->drm,
@@ -607,64 +609,64 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
enable = source != INTEL_PIPE_CRC_SOURCE_NONE;
if (enable)
- intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true);
+ intel_crtc_crc_setup_workarounds(crtc, true);
- ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
+ ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
if (ret != 0)
goto out;
pipe_crc->source = source;
- intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(pipe), val);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(pipe));
if (!source) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
+ vlv_undo_pipe_scramble_reset(dev_priv, pipe);
}
pipe_crc->skipped = 0;
out:
if (!enable)
- intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false);
+ intel_crtc_crc_setup_workarounds(crtc, false);
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
-void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
+void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
+ enum pipe pipe = crtc->pipe;
u32 val = 0;
- if (!crtc->crc.opened)
+ if (!crtc->base.crc.opened)
return;
- if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0)
+ if (get_new_crc_ctl_reg(dev_priv, pipe, &pipe_crc->source, &val) < 0)
return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */
pipe_crc->skipped = 0;
- intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(pipe), val);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(pipe));
}
-void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
+void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
+ enum pipe pipe = crtc->pipe;
/* Swallow crc's until we stop generating them. */
spin_lock_irq(&pipe_crc->lock);
pipe_crc->skipped = INT_MIN;
spin_unlock_irq(&pipe_crc->lock);
- intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), 0);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(pipe), 0);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(pipe));
intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index c55da130773b..a36ec4a818ff 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -5,6 +5,7 @@
#include "g4x_dp.h"
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpll.h"
@@ -313,10 +314,10 @@ void intel_pps_reset_all(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
- if (drm_WARN_ON(&dev_priv->drm,
- !(IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_GEN9_LP(dev_priv))))
+ if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
+ return;
+
+ if (!HAS_DISPLAY(dev_priv))
return;
/*
@@ -338,7 +339,7 @@ void intel_pps_reset_all(struct drm_i915_private *dev_priv)
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
- if (IS_GEN9_LP(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 9)
intel_dp->pps.pps_reset = true;
else
intel_dp->pps.pps_pipe = INVALID_PIPE;
@@ -361,7 +362,7 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
memset(regs, 0, sizeof(*regs));
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pps_idx = bxt_power_sequencer_idx(intel_dp);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pps_idx = vlv_power_sequencer_pipe(intel_dp);
@@ -372,7 +373,8 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
regs->pp_off = PP_OFF_DELAYS(pps_idx);
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
- if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+ INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
regs->pp_div = INVALID_MMIO_REG;
else
regs->pp_div = PP_DIVISOR(pps_idx);
@@ -1378,7 +1380,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
int pps_num;
int pps_idx;
- if (HAS_DDI(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
return;
/*
* This w/a is needed at least on CPT/PPT, but to be sure apply it
@@ -1399,7 +1401,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
void intel_pps_setup(struct drm_i915_private *i915)
{
- if (HAS_PCH_SPLIT(i915) || IS_GEN9_LP(i915))
+ if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
i915->pps_mmio_base = PCH_PPS_BASE;
else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
i915->pps_mmio_base = VLV_PPS_BASE;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 8ada4f829cab..77865cf6641f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_hdmi.h"
@@ -524,7 +525,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
val |= EDP_Y_COORDINATE_ENABLE;
val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
@@ -637,7 +638,7 @@ unlock:
static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
{
- if (!intel_dp->psr.dc3co_enabled)
+ if (!intel_dp->psr.dc3co_exitline)
return;
cancel_delayed_work(&intel_dp->psr.dc3co_work);
@@ -645,28 +646,47 @@ static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
tgl_psr2_disable_dc3co(intel_dp);
}
+static bool
+dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum port port = dig_port->base.port;
+
+ if (IS_ALDERLAKE_P(dev_priv))
+ return pipe <= PIPE_B && port <= PORT_B;
+ else
+ return pipe == PIPE_A && port == PORT_A;
+}
+
static void
tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 exit_scanlines;
/*
+ * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
+ * disable DC3CO until the changed dc3co activating/deactivating sequence
+ * is applied. B.Specs:49196
+ */
+ return;
+
+ /*
* DMC's DC3CO exit mechanism has an issue with Selective Fecth
* TODO: when the issue is addressed, this restriction should be removed.
*/
if (crtc_state->enable_psr2_sel_fetch)
return;
- if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
- /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
- if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A ||
- dig_port->base.port != PORT_A)
+ if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
return;
/*
@@ -712,6 +732,13 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
}
}
+ /* Wa_14010254185 Wa_14010103792 */
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
+ return false;
+ }
+
return crtc_state->enable_psr2_sel_fetch = true;
}
@@ -732,6 +759,21 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ /* Wa_16011181250 */
+ if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
+ return false;
+ }
+
+ /*
+ * We are missing the implementation of some workarounds to enabled PSR2
+ * in Alderlake_P, until ready PSR2 should be kept disabled.
+ */
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 is missing the implementation of workarounds\n");
+ return false;
+ }
+
if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not supported in transcoder %s\n",
@@ -769,7 +811,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
psr_max_h = 4096;
psr_max_v = 2304;
max_bpp = 24;
- } else if (IS_DISPLAY_VER(dev_priv, 9)) {
+ } else if (DISPLAY_VER(dev_priv) == 9) {
psr_max_h = 3640;
psr_max_v = 2304;
max_bpp = 24;
@@ -804,6 +846,13 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
}
+ /* Wa_2209313811 */
+ if (!crtc_state->enable_psr2_sel_fetch &&
+ IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
+ return false;
+ }
+
if (!crtc_state->enable_psr2_sel_fetch &&
(crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
drm_dbg_kms(&dev_priv->drm,
@@ -873,6 +922,51 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
}
+void intel_psr_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp;
+ u32 val;
+
+ if (!dig_port)
+ return;
+
+ intel_dp = &dig_port->dp;
+ if (!CAN_PSR(intel_dp))
+ return;
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled)
+ goto unlock;
+
+ /*
+ * Not possible to read EDP_PSR/PSR2_CTL registers as it is
+ * enabled/disabled because of frontbuffer tracking and others.
+ */
+ pipe_config->has_psr = true;
+ pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
+ pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+
+ if (!intel_dp->psr.psr2_enabled)
+ goto unlock;
+
+ if (HAS_PSR2_SEL_FETCH(dev_priv)) {
+ val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
+ if (val & PSR2_MAN_TRK_CTL_ENABLE)
+ pipe_config->enable_psr2_sel_fetch = true;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
+ val &= EXITLINE_MASK;
+ pipe_config->dc3co_exitline = val;
+ }
+unlock:
+ mutex_unlock(&intel_dp->psr.lock);
+}
+
static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -896,11 +990,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
intel_dp->psr.active = true;
}
-static void intel_psr_enable_source(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_enable_source(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
@@ -909,7 +1002,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_psr_setup_aux(intel_dp);
- if (intel_dp->psr.psr2_enabled && IS_DISPLAY_VER(dev_priv, 9)) {
+ if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) {
i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 chicken = intel_de_read(dev_priv, reg);
@@ -937,7 +1030,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
psr_irq_control(intel_dp);
- if (crtc_state->dc3co_exitline) {
+ if (intel_dp->psr.dc3co_exitline) {
u32 val;
/*
@@ -946,7 +1039,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
*/
val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
val &= ~EXITLINE_MASK;
- val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT;
+ val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
val |= EXITLINE_ENABLE;
intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
}
@@ -957,27 +1050,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
IGNORE_PSR2_HW_TRACKING : 0);
}
-static void intel_psr_enable_locked(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
u32 val;
- drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
-
- intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
- intel_dp->psr.busy_frontbuffer_bits = 0;
- intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
- intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
- /* DC5/DC6 requires at least 6 idle frames */
- val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
- intel_dp->psr.dc3co_exit_delay = val;
- intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
-
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
* will still keep the error set even after the reset done in the
@@ -998,17 +1075,45 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.sink_not_reliable = true;
drm_dbg_kms(&dev_priv->drm,
"PSR interruption error set, not enabling PSR\n");
- return;
+ return false;
}
+ return true;
+}
+
+static void intel_psr_enable_locked(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ u32 val;
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
+
+ intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+ intel_dp->psr.busy_frontbuffer_bits = 0;
+ intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
+ /* DC5/DC6 requires at least 6 idle frames */
+ val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
+ intel_dp->psr.dc3co_exit_delay = val;
+ intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
+ intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+
+ if (!psr_interrupt_error_check(intel_dp))
+ return;
+
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
intel_dp->psr.psr2_enabled ? "2" : "1");
intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
&intel_dp->psr.vsc);
intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
intel_psr_enable_sink(intel_dp);
- intel_psr_enable_source(intel_dp, crtc_state);
+ intel_psr_enable_source(intel_dp);
intel_dp->psr.enabled = true;
+ intel_dp->psr.paused = false;
intel_psr_activate(intel_dp);
}
@@ -1078,22 +1183,12 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
intel_dp->psr.active = false;
}
-static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t psr_status;
u32 psr_status_mask;
- lockdep_assert_held(&intel_dp->psr.lock);
-
- if (!intel_dp->psr.enabled)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
- intel_dp->psr.psr2_enabled ? "2" : "1");
-
- intel_psr_exit(intel_dp);
-
if (intel_dp->psr.psr2_enabled) {
psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
@@ -1106,6 +1201,22 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (intel_de_wait_for_clear(dev_priv, psr_status,
psr_status_mask, 2000))
drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
+}
+
+static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&intel_dp->psr.lock);
+
+ if (!intel_dp->psr.enabled)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ intel_dp->psr.psr2_enabled ? "2" : "1");
+
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
/* WA 1408330847 */
if (intel_dp->psr.psr2_sel_fetch_enabled &&
@@ -1150,25 +1261,66 @@ void intel_psr_disable(struct intel_dp *intel_dp,
cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
}
+/**
+ * intel_psr_pause - Pause PSR
+ * @intel_dp: Intel DP
+ *
+ * This function need to be called after enabling psr.
+ */
+void intel_psr_pause(struct intel_dp *intel_dp)
+{
+ struct intel_psr *psr = &intel_dp->psr;
+
+ if (!CAN_PSR(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (!psr->enabled) {
+ mutex_unlock(&psr->lock);
+ return;
+ }
+
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
+ psr->paused = true;
+
+ mutex_unlock(&psr->lock);
+
+ cancel_work_sync(&psr->work);
+ cancel_delayed_work_sync(&psr->dc3co_work);
+}
+
+/**
+ * intel_psr_resume - Resume PSR
+ * @intel_dp: Intel DP
+ *
+ * This function need to be called after pausing psr.
+ */
+void intel_psr_resume(struct intel_dp *intel_dp)
+{
+ struct intel_psr *psr = &intel_dp->psr;
+
+ if (!CAN_PSR(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (!psr->paused)
+ goto unlock;
+
+ psr->paused = false;
+ intel_psr_activate(intel_dp);
+
+unlock:
+ mutex_unlock(&psr->lock);
+}
+
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (IS_TIGERLAKE(dev_priv))
- /*
- * Writes to CURSURFLIVE in TGL are causing IOMMU errors and
- * visual glitches that are often reproduced when executing
- * CPU intensive workloads while a eDP 4K panel is attached.
- *
- * Manually exiting PSR causes the frontbuffer to be updated
- * without glitches and the IOMMU errors are also gone but
- * this comes at the cost of less time with PSR active.
- *
- * So using this workaround until this issue is root caused
- * and a better fix is found.
- */
- intel_psr_exit(intel_dp);
- else if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
/*
* Display WA #0884: skl+
* This documented WA for bxt can be safely applied
@@ -1759,7 +1911,7 @@ tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
{
mutex_lock(&intel_dp->psr.lock);
- if (!intel_dp->psr.dc3co_enabled)
+ if (!intel_dp->psr.dc3co_exitline)
goto unlock;
if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
@@ -1818,6 +1970,16 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
+ /*
+ * If the PSR is paused by an explicit intel_psr_paused() call,
+ * we have to ensure that the PSR is not activated until
+ * intel_psr_resume() is called.
+ */
+ if (intel_dp->psr.paused) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
/* By definition flush = invalidate + flush */
if (pipe_frontbuffer_bits)
psr_force_hw_tracking_exit(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 0491a49ffd50..641521b101c8 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -17,6 +17,7 @@ struct intel_crtc;
struct intel_atomic_state;
struct intel_plane_state;
struct intel_plane;
+struct intel_encoder;
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_enable(struct intel_dp *intel_dp,
@@ -37,6 +38,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
void intel_psr_init(struct intel_dp *intel_dp);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
+void intel_psr_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config);
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
@@ -48,5 +51,7 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int color_plane);
+void intel_psr_pause(struct intel_dp *intel_dp);
+void intel_psr_resume(struct intel_dp *intel_dp);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
new file mode 100644
index 000000000000..c626a24fe98f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_dsc.h>
+
+#include "i915_utils.h"
+#include "intel_qp_tables.h"
+
+/* from BPP 6 to 24 in steps of 0.5 */
+#define RC_RANGE_QP444_8BPC_MAX_NUM_BPP 37
+
+/* from BPP 6 to 30 in steps of 0.5 */
+#define RC_RANGE_QP444_10BPC_MAX_NUM_BPP 49
+
+/* from BPP 6 to 36 in steps of 0.5 */
+#define RC_RANGE_QP444_12BPC_MAX_NUM_BPP 61
+
+/*
+ * These qp tables are as per the C model
+ * and it has the rows pointing to bpps which increment
+ * in steps of 0.5
+ * We do not support fractional bpps as of today,
+ * hence we would skip the fractional bpps during
+ * our references for qp calclulations.
+ */
+static const u8 rc_range_minqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 },
+ { 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 },
+ { 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3,
+ 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0 },
+ { 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4,
+ 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0 },
+ { 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5,
+ 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 },
+ { 14, 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 9, 8, 8,
+ 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3 }
+};
+
+static const u8 rc_range_maxqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = {
+ { 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 6, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 3, 3, 2, 2, 2, 2, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1 },
+ { 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 4, 4, 4, 4, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1 },
+ { 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 5,
+ 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
+ { 12, 11, 11, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 7, 6, 6, 5, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
+ { 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 9, 9, 9, 8, 8, 7, 7, 6, 6, 6,
+ 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1 },
+ { 12, 12, 12, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7,
+ 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 12, 12, 12, 12, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 8, 8, 7, 7, 7,
+ 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8,
+ 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 15, 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 9,
+ 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4 }
+};
+
+static const u8 rc_range_minqp444_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_10BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 7, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 7, 7, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3,
+ 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4,
+ 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5,
+ 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0,
+ 0, 0, 0 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5,
+ 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0,
+ 0, 0, 0 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5,
+ 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1,
+ 1, 0, 0 },
+ { 10, 9, 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 0 },
+ { 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6,
+ 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1,
+ 1, 1, 1 },
+ { 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8,
+ 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2,
+ 2, 1, 1, 1 },
+ { 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8,
+ 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2,
+ 2, 2, 2, 1 },
+ { 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4,
+ 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 18, 18, 17, 17, 16, 16, 16, 16, 15, 15, 14, 14, 14, 14, 13, 13, 13,
+ 12, 12, 12, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7,
+ 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3 }
+};
+
+static const u8 rc_range_maxqp444_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_10BPC_MAX_NUM_BPP] = {
+ { 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 12, 11, 11, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 5, 5, 5, 4,
+ 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0 },
+ { 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 8, 7, 7, 6,
+ 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 8, 8, 7,
+ 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 9, 8, 8,
+ 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2,
+ 2, 2, 1, 1, 1, 1, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 8, 8,
+ 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1 },
+ { 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 10, 9, 9,
+ 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3,
+ 3, 2, 2, 2, 1, 1, 1, 1 },
+ { 15, 15, 14, 14, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 11, 10, 10, 9,
+ 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3,
+ 3, 3, 2, 2, 2, 2, 1, 1 },
+ { 16, 15, 15, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 11, 10,
+ 10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4,
+ 4, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 16, 16, 15, 15, 14, 14, 14, 14, 14, 14, 13, 13, 13, 12, 12, 11, 11,
+ 10, 10, 10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5,
+ 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 },
+ { 16, 16, 16, 15, 15, 15, 14, 14, 14, 14, 13, 13, 13, 13, 12, 12, 12,
+ 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 16, 16, 16, 16, 15, 15, 15, 15, 15, 14, 14, 13, 13, 13, 12, 12, 12,
+ 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 3, 3, 2 },
+ { 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 14, 14, 14, 14, 13, 13, 12,
+ 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6,
+ 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2 },
+ { 19, 19, 18, 18, 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 14, 14, 14,
+ 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 10, 9, 9, 8, 8, 8,
+ 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4 }
+};
+
+static const u8 rc_range_minqp444_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_12BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 11, 10, 10, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3,
+ 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5,
+ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 7, 7, 7,
+ 7, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 9, 9, 8, 8,
+ 8, 8, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 9, 9, 9, 9,
+ 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3,
+ 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10,
+ 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4,
+ 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0 },
+ { 14, 13, 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5,
+ 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 0 },
+ { 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12,
+ 12, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7,
+ 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1 },
+ { 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 12, 12, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8,
+ 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 },
+ { 14, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 12, 12, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8,
+ 7, 7, 7, 7, 6, 6, 6, 6, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 },
+ { 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8,
+ 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1 },
+ { 22, 22, 21, 21, 20, 20, 20, 20, 19, 19, 18, 18, 18, 18, 17, 17, 17, 16, 16,
+ 16, 15, 15, 15, 15, 14, 14, 13, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 11,
+ 10, 10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3 }
+};
+
+static const u8 rc_range_maxqp444_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_12BPC_MAX_NUM_BPP] = {
+ { 12, 12, 12, 12, 12, 12, 11, 11, 11, 10, 9, 9, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4,
+ 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 11, 11, 9, 9, 9, 8, 8, 7, 7, 7, 7, 5,
+ 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 16, 15, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 10, 10, 9, 9,
+ 9, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 16, 16, 15, 15, 14, 14, 14, 14, 14, 14, 14, 14, 13, 13, 13, 12, 11, 11, 10,
+ 10, 10, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 2,
+ 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 12, 12, 11, 10,
+ 10, 10, 10, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ { 17, 16, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 14, 13, 12, 12, 11,
+ 11, 11, 11, 9, 9, 9, 9, 8, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 17, 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 12, 12, 11,
+ 11, 11, 11, 11, 10, 10, 10, 9, 9, 9, 8, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5,
+ 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0 },
+ { 18, 18, 17, 17, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 14, 13, 13, 12,
+ 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6,
+ 6, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1 },
+ { 19, 19, 18, 18, 17, 17, 17, 17, 17, 17, 16, 16, 16, 15, 15, 14, 14, 13, 13,
+ 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6,
+ 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1 },
+ { 20, 19, 19, 18, 18, 18, 17, 17, 17, 17, 17, 17, 17, 16, 16, 15, 14, 14, 13,
+ 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7,
+ 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 1 },
+ { 20, 20, 19, 19, 18, 18, 18, 18, 18, 18, 17, 17, 17, 16, 16, 15, 15, 14, 14,
+ 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9,
+ 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2 },
+ { 20, 20, 20, 19, 19, 19, 18, 18, 18, 18, 17, 17, 17, 17, 16, 16, 16, 15, 15,
+ 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9,
+ 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 20, 20, 20, 20, 19, 19, 19, 19, 19, 18, 18, 17, 17, 17, 16, 16, 16, 15, 15,
+ 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9,
+ 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 21, 21, 21, 21, 20, 20, 19, 19, 19, 19, 18, 18, 18, 18, 17, 17, 16, 16, 16,
+ 16, 15, 15, 14, 14, 14, 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10,
+ 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2 },
+ { 23, 23, 22, 22, 21, 21, 21, 21, 20, 20, 19, 19, 19, 19, 18, 18, 18, 17, 17,
+ 17, 16, 16, 16, 16, 15, 15, 14, 14, 14, 14, 14, 13, 13, 12, 12, 12, 12, 12,
+ 11, 11, 10, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4 }
+};
+
+#define PARAM_TABLE(_minmax, _bpc, _row, _col) do { \
+ if (bpc == (_bpc)) \
+ return rc_range_##_minmax##qp444_##_bpc##bpc[_row][_col]; \
+} while (0)
+
+u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i)
+{
+ PARAM_TABLE(min, 8, buf_i, bpp_i);
+ PARAM_TABLE(min, 10, buf_i, bpp_i);
+ PARAM_TABLE(min, 12, buf_i, bpp_i);
+
+ MISSING_CASE(bpc);
+ return 0;
+}
+
+u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i)
+{
+ PARAM_TABLE(max, 8, buf_i, bpp_i);
+ PARAM_TABLE(max, 10, buf_i, bpp_i);
+ PARAM_TABLE(max, 12, buf_i, bpp_i);
+
+ MISSING_CASE(bpc);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.h b/drivers/gpu/drm/i915/display/intel_qp_tables.h
new file mode 100644
index 000000000000..9fb3c36bd7c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_QP_TABLES_H_
+#define _INTEL_QP_TABLES_H_
+
+#include <linux/types.h>
+
+u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i);
+u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index f770d6bcd2c9..e4f91d7a5c60 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -38,6 +38,8 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index acbf4e63b245..4ae9a7455b23 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -43,6 +43,7 @@
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_atomic_plane.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_sprite.h"
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 71b8edafb1c3..c23c210a55f5 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -26,9 +26,7 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-
- if (IS_DISPLAY_VER(i915, 11))
+ if (intel_tc_cold_requires_aux_pw(dig_port))
return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
else
return POWER_DOMAIN_TC_COLD_OFF;
@@ -40,7 +38,7 @@ tc_cold_block(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum intel_display_power_domain domain;
- if (IS_DISPLAY_VER(i915, 11) && !dig_port->tc_legacy_port)
+ if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port)
return 0;
domain = tc_cold_get_power_domain(dig_port);
@@ -71,7 +69,7 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
bool enabled;
- if (IS_DISPLAY_VER(i915, 11) && !dig_port->tc_legacy_port)
+ if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port)
return;
enabled = intel_display_power_is_enabled(i915,
@@ -205,7 +203,7 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}
-static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
+static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
@@ -238,6 +236,40 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
return mask;
}
+static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val, mask = 0;
+
+ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
+ if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
+ mask |= BIT(TC_PORT_DP_ALT);
+ if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
+ mask |= BIT(TC_PORT_TBT_ALT);
+
+ if (intel_uncore_read(uncore, SDEISR) & isr_bit)
+ mask |= BIT(TC_PORT_LEGACY);
+
+ /* The sink can be connected only in a single mode. */
+ if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
+ tc_port_fixup_legacy_flag(dig_port, mask);
+
+ return mask;
+}
+
+static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_port_live_status_mask(dig_port);
+
+ return icl_tc_port_live_status_mask(dig_port);
+}
+
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -256,8 +288,35 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
}
-static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
- bool enable)
+static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx));
+ if (val == 0xffffffff) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assuming not complete\n",
+ dig_port->tc_port_name);
+ return false;
+ }
+
+ return val & TCSS_DDI_STATUS_READY;
+}
+
+static bool tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_phy_status_complete(dig_port);
+
+ return icl_tc_phy_status_complete(dig_port);
+}
+
+static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
+ bool take)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
@@ -267,20 +326,20 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
- "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
- dig_port->tc_port_name, enableddisabled(enable));
+ "Port %s: PHY in TCCOLD, can't %s ownership\n",
+ dig_port->tc_port_name, take ? "take" : "release");
return false;
}
val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
- if (!enable)
+ if (take)
val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
intel_uncore_write(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
- if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
+ if (!take && wait_for(!tc_phy_status_complete(dig_port), 10))
drm_dbg_kms(&i915->drm,
"Port %s: PHY complete clear timed out\n",
dig_port->tc_port_name);
@@ -288,7 +347,35 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
return true;
}
-static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
+static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
+ bool take)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum port port = dig_port->base.port;
+ u32 val;
+
+ val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
+ if (take)
+ val |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ else
+ val &= ~DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ intel_uncore_write(uncore, DDI_BUF_CTL(port), val);
+
+ return true;
+}
+
+static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_phy_take_ownership(dig_port, take);
+
+ return icl_tc_phy_take_ownership(dig_port, take);
+}
+
+static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
@@ -303,7 +390,28 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
return true;
}
- return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
+ return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
+}
+
+static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum port port = dig_port->base.port;
+ u32 val;
+
+ val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
+ return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+}
+
+static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_phy_is_owned(dig_port);
+
+ return icl_tc_phy_is_owned(dig_port);
}
/*
@@ -323,13 +431,13 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int max_lanes;
- if (!icl_tc_phy_status_complete(dig_port)) {
+ if (!tc_phy_status_complete(dig_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
dig_port->tc_port_name);
goto out_set_tbt_alt_mode;
}
- if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
+ if (!tc_phy_take_ownership(dig_port, true) &&
!drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
goto out_set_tbt_alt_mode;
@@ -348,7 +456,7 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
dig_port->tc_port_name);
- goto out_set_safe_mode;
+ goto out_release_phy;
}
if (max_lanes < required_lanes) {
@@ -356,15 +464,15 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
"Port %s: PHY max lanes %d < required lanes %d\n",
dig_port->tc_port_name,
max_lanes, required_lanes);
- goto out_set_safe_mode;
+ goto out_release_phy;
}
dig_port->tc_mode = TC_PORT_DP_ALT;
return;
-out_set_safe_mode:
- icl_tc_phy_set_safe_mode(dig_port, true);
+out_release_phy:
+ tc_phy_take_ownership(dig_port, false);
out_set_tbt_alt_mode:
dig_port->tc_mode = TC_PORT_TBT_ALT;
}
@@ -380,7 +488,7 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
/* Nothing to do, we never disconnect from legacy mode */
break;
case TC_PORT_DP_ALT:
- icl_tc_phy_set_safe_mode(dig_port, true);
+ tc_phy_take_ownership(dig_port, false);
dig_port->tc_mode = TC_PORT_TBT_ALT;
break;
case TC_PORT_TBT_ALT:
@@ -395,14 +503,14 @@ static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- if (!icl_tc_phy_status_complete(dig_port)) {
+ if (!tc_phy_status_complete(dig_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
dig_port->tc_port_name);
return dig_port->tc_mode == TC_PORT_TBT_ALT;
}
- if (icl_tc_phy_is_in_safe_mode(dig_port)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY still in safe mode\n",
+ if (!tc_phy_is_owned(dig_port)) {
+ drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
dig_port->tc_port_name);
return false;
@@ -417,11 +525,10 @@ intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 live_status_mask = tc_port_live_status_mask(dig_port);
- bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
enum tc_port_mode mode;
- if (in_safe_mode ||
- drm_WARN_ON(&i915->drm, !icl_tc_phy_status_complete(dig_port)))
+ if (!tc_phy_is_owned(dig_port) ||
+ drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
return TC_PORT_TBT_ALT;
mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
@@ -443,7 +550,7 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
if (live_status_mask)
return fls(live_status_mask) - 1;
- return icl_tc_phy_status_complete(dig_port) &&
+ return tc_phy_status_complete(dig_port) &&
dig_port->tc_legacy_port ? TC_PORT_LEGACY :
TC_PORT_TBT_ALT;
}
@@ -455,7 +562,7 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
intel_display_power_flush_work(i915);
- if (DISPLAY_VER(i915) != 11 || !dig_port->tc_legacy_port) {
+ if (!intel_tc_cold_requires_aux_pw(dig_port)) {
enum intel_display_power_domain aux_domain;
bool aux_powered;
@@ -625,9 +732,11 @@ tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig
if (!INTEL_INFO(i915)->display.has_modular_fia)
return false;
+ mutex_lock(&dig_port->tc_lock);
wakeref = tc_cold_block(dig_port);
val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
tc_cold_unblock(dig_port, wakeref);
+ mutex_unlock(&dig_port->tc_lock);
drm_WARN_ON(&i915->drm, val == 0xffffffff);
@@ -670,3 +779,11 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
dig_port->tc_link_refcount = 0;
tc_port_load_fia_params(i915, dig_port);
}
+
+bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
+ IS_ALDERLAKE_P(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index b619e4736f85..0eacbd76ec15 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -29,4 +29,6 @@ bool intel_tc_port_ref_held(struct intel_digital_port *dig_port);
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
+bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port);
+
#endif /* __INTEL_TC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index e558f121ec4e..aa52af7891f0 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -36,6 +36,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
#include "intel_tv.h"
@@ -1165,7 +1166,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv,
int hdisplay)
{
- return IS_DISPLAY_VER(dev_priv, 3) && hdisplay > 1024;
+ return DISPLAY_VER(dev_priv) == 3 && hdisplay > 1024;
}
static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode,
@@ -1306,7 +1307,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
* the active portion. Hence following this formula seems
* more trouble that it's worth.
*
- * if (IS_GEN(dev_priv, 4)) {
+ * if (GRAPHICS_VER(dev_priv) == 4) {
* num = cdclk * (tv_mode->oversample >> !tv_mode->progressive);
* den = tv_mode->clock;
* } else {
@@ -1789,7 +1790,7 @@ intel_tv_get_modes(struct drm_connector *connector)
continue;
/* no vertical scaling with wide sources on gen3 */
- if (IS_DISPLAY_VER(dev_priv, 3) && input->w > 1024 &&
+ if (DISPLAY_VER(dev_priv) == 3 && input->w > 1024 &&
input->h > intel_tv_mode_vdisplay(tv_mode))
continue;
@@ -1978,7 +1979,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
/* 1080p50/1080p60 not supported on gen3 */
- if (IS_DISPLAY_VER(dev_priv, 3) &&
+ if (DISPLAY_VER(dev_priv) == 3 &&
tv_modes[i].oversample == 1)
break;
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 3a21c65ffa85..85749370508c 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -5,11 +5,13 @@
* Author: Gaurav K Singh <gaurav.k.singh@intel.com>
* Manasi Navare <manasi.d.navare@intel.com>
*/
-
+#include <linux/limits.h>
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_vdsc.h"
+#include "intel_qp_tables.h"
enum ROW_INDEX_BPP {
ROW_INDEX_6BPP = 0,
@@ -372,12 +374,81 @@ static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
return true;
}
+static void
+calculate_rc_params(struct rc_parameters *rc,
+ struct drm_dsc_config *vdsc_cfg)
+{
+ int bpc = vdsc_cfg->bits_per_component;
+ int bpp = vdsc_cfg->bits_per_pixel >> 4;
+ int ofs_und6[] = { 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 };
+ int ofs_und8[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 };
+ int ofs_und12[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 };
+ int ofs_und15[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 };
+ int qp_bpc_modifier = (bpc - 8) * 2;
+ u32 res, buf_i, bpp_i;
+
+ if (vdsc_cfg->slice_height >= 8)
+ rc->first_line_bpg_offset =
+ 12 + DIV_ROUND_UP((9 * min(34, vdsc_cfg->slice_height - 8)), 100);
+ else
+ rc->first_line_bpg_offset = 2 * (vdsc_cfg->slice_height - 1);
+
+ /* Our hw supports only 444 modes as of today */
+ if (bpp >= 12)
+ rc->initial_offset = 2048;
+ else if (bpp >= 10)
+ rc->initial_offset = 5632 - DIV_ROUND_UP(((bpp - 10) * 3584), 2);
+ else if (bpp >= 8)
+ rc->initial_offset = 6144 - DIV_ROUND_UP(((bpp - 8) * 512), 2);
+ else
+ rc->initial_offset = 6144;
+
+ /* initial_xmit_delay = rc_model_size/2/compression_bpp */
+ rc->initial_xmit_delay = DIV_ROUND_UP(DSC_RC_MODEL_SIZE_CONST, 2 * bpp);
+
+ rc->flatness_min_qp = 3 + qp_bpc_modifier;
+ rc->flatness_max_qp = 12 + qp_bpc_modifier;
+
+ rc->rc_quant_incr_limit0 = 11 + qp_bpc_modifier;
+ rc->rc_quant_incr_limit1 = 11 + qp_bpc_modifier;
+
+ bpp_i = (2 * (bpp - 6));
+ for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
+ /* Read range_minqp and range_max_qp from qp tables */
+ rc->rc_range_params[buf_i].range_min_qp =
+ intel_lookup_range_min_qp(bpc, buf_i, bpp_i);
+ rc->rc_range_params[buf_i].range_max_qp =
+ intel_lookup_range_max_qp(bpc, buf_i, bpp_i);
+
+ /* Calculate range_bgp_offset */
+ if (bpp <= 6) {
+ rc->rc_range_params[buf_i].range_bpg_offset = ofs_und6[buf_i];
+ } else if (bpp <= 8) {
+ res = DIV_ROUND_UP(((bpp - 6) * (ofs_und8[buf_i] - ofs_und6[buf_i])), 2);
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und6[buf_i] + res;
+ } else if (bpp <= 12) {
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und8[buf_i];
+ } else if (bpp <= 15) {
+ res = DIV_ROUND_UP(((bpp - 12) * (ofs_und15[buf_i] - ofs_und12[buf_i])), 3);
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und12[buf_i] + res;
+ } else {
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und15[buf_i];
+ }
+ }
+}
+
int intel_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
const struct rc_parameters *rc_params;
+ struct rc_parameters *rc = NULL;
u8 i = 0;
vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
@@ -412,9 +483,24 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
vdsc_cfg->rc_buf_thresh[13] = 0x7D;
}
- rc_params = get_rc_params(compressed_bpp, vdsc_cfg->bits_per_component);
- if (!rc_params)
- return -EINVAL;
+ /*
+ * From XE_LPD onwards we supports compression bpps in steps of 1
+ * upto uncompressed bpp-1, hence add calculations for all the rc
+ * parameters
+ */
+ if (DISPLAY_VER(dev_priv) >= 13) {
+ rc = kmalloc(sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ calculate_rc_params(rc, vdsc_cfg);
+ rc_params = rc;
+ } else {
+ rc_params = get_rc_params(compressed_bpp,
+ vdsc_cfg->bits_per_component);
+ if (!rc_params)
+ return -EINVAL;
+ }
vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset;
vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay;
@@ -440,20 +526,20 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
/*
* BitsPerComponent value determines mux_word_size:
- * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits
- * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to
- * 48 bits
+ * When BitsPerComponent is less than or 10bpc, muxWordSize will be equal to
+ * 48 bits otherwise 64
*/
- if (vdsc_cfg->bits_per_component == 8 ||
- vdsc_cfg->bits_per_component == 10)
+ if (vdsc_cfg->bits_per_component <= 10)
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
- else if (vdsc_cfg->bits_per_component == 12)
+ else
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
/* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
+ kfree(rc);
+
return 0;
}
@@ -469,13 +555,13 @@ intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
* POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases:
*
* - ICL eDP/DSI transcoder
- * - Gen12+ (except RKL) pipe A
+ * - Display version 12 (except RKL) pipe A
*
* For any other pipe, VDSC/joining uses the power well associated with
* the pipe in use. Hence another reference on the pipe power domain
* will suffice. (Except no VDSC/joining on ICL pipe A.)
*/
- if (DISPLAY_VER(i915) >= 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
+ if (DISPLAY_VER(i915) == 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else if (is_pipe_dsc(crtc_state))
return POWER_DOMAIN_PIPE(pipe);
@@ -1020,6 +1106,43 @@ static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state)
return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2;
}
+static struct intel_crtc *
+_get_crtc_for_pipe(struct drm_i915_private *i915, enum pipe pipe)
+{
+ if (!intel_pipe_valid(i915, pipe))
+ return NULL;
+
+ return intel_get_crtc_for_pipe(i915, pipe);
+}
+
+struct intel_crtc *
+intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc)
+{
+ return _get_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1);
+}
+
+static struct intel_crtc *
+intel_dsc_get_bigjoiner_primary(const struct intel_crtc *secondary_crtc)
+{
+ return _get_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1);
+}
+
+void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dss_ctl1_val = 0;
+
+ if (crtc_state->bigjoiner && !crtc_state->dsc.compression_enable) {
+ if (crtc_state->bigjoiner_slave)
+ dss_ctl1_val |= UNCOMPRESSED_JOINER_SLAVE;
+ else
+ dss_ctl1_val |= UNCOMPRESSED_JOINER_MASTER;
+
+ intel_de_write(dev_priv, dss_ctl1_reg(crtc_state), dss_ctl1_val);
+ }
+}
+
void intel_dsc_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -1059,11 +1182,31 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!old_crtc_state->dsc.compression_enable)
- return;
+ /* Disable only if either of them is enabled */
+ if (old_crtc_state->dsc.compression_enable ||
+ old_crtc_state->bigjoiner) {
+ intel_de_write(dev_priv, dss_ctl1_reg(old_crtc_state), 0);
+ intel_de_write(dev_priv, dss_ctl2_reg(old_crtc_state), 0);
+ }
+}
+
+void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dss_ctl1;
- intel_de_write(dev_priv, dss_ctl1_reg(old_crtc_state), 0);
- intel_de_write(dev_priv, dss_ctl2_reg(old_crtc_state), 0);
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc_state));
+ if (dss_ctl1 & UNCOMPRESSED_JOINER_MASTER) {
+ crtc_state->bigjoiner = true;
+ crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
+ } else if (dss_ctl1 & UNCOMPRESSED_JOINER_SLAVE) {
+ crtc_state->bigjoiner = true;
+ crtc_state->bigjoiner_slave = true;
+ crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_primary(crtc);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
+ }
}
void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
@@ -1100,14 +1243,11 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
if (!(dss_ctl1 & MASTER_BIG_JOINER_ENABLE)) {
crtc_state->bigjoiner_slave = true;
- if (!WARN_ON(crtc->pipe == PIPE_A))
- crtc_state->bigjoiner_linked_crtc =
- intel_get_crtc_for_pipe(dev_priv, crtc->pipe - 1);
+ crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_primary(crtc);
} else {
- if (!WARN_ON(INTEL_NUM_PIPES(dev_priv) == crtc->pipe + 1))
- crtc_state->bigjoiner_linked_crtc =
- intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
+ crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
}
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc);
}
/* FIXME: add more state readout as needed */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 65d301c23580..dfb1fd38deb4 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -12,13 +12,16 @@ struct intel_encoder;
struct intel_crtc_state;
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
+void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int intel_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
+void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
+struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index a9c2b2fd9252..c335b1dbafcf 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -5,6 +5,7 @@
*/
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_vrr.h"
@@ -67,7 +68,10 @@ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_stat
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* The hw imposes the extra scanline before frame start */
- return crtc_state->vrr.pipeline_full + i915->framestart_delay + 1;
+ if (DISPLAY_VER(i915) >= 13)
+ return crtc_state->vrr.guardband + i915->framestart_delay + 1;
+ else
+ return crtc_state->vrr.pipeline_full + i915->framestart_delay + 1;
}
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
@@ -85,6 +89,8 @@ void
intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -123,17 +129,26 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
/*
- * FIXME: s/4/framestart_delay+1/ to get consistent
- * earliest/latest points for register latching regardless
- * of the framestart_delay used?
- *
- * FIXME: this really needs the extra scanline to provide consistent
- * behaviour for all framestart_delay values. Otherwise with
- * framestart_delay==3 we will end up extending the min vblank by
- * one extra line.
+ * For XE_LPD+, we use guardband and pipeline override
+ * is deprecated.
*/
- crtc_state->vrr.pipeline_full =
- min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+ if (DISPLAY_VER(i915) >= 13)
+ crtc_state->vrr.guardband =
+ crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay -
+ i915->window2_delay;
+ else
+ /*
+ * FIXME: s/4/framestart_delay+1/ to get consistent
+ * earliest/latest points for register latching regardless
+ * of the framestart_delay used?
+ *
+ * FIXME: this really needs the extra scanline to provide consistent
+ * behaviour for all framestart_delay values. Otherwise with
+ * framestart_delay==3 we will end up extending the min vblank by
+ * one extra line.
+ */
+ crtc_state->vrr.pipeline_full =
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
@@ -148,10 +163,15 @@ void intel_vrr_enable(struct intel_encoder *encoder,
if (!crtc_state->vrr.enable)
return;
- trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
- VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
- VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
- VRR_CTL_PIPELINE_FULL_OVERRIDE;
+ if (DISPLAY_VER(dev_priv) >= 13)
+ trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+ VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
+ else
+ trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+ VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
+ VRR_CTL_PIPELINE_FULL_OVERRIDE;
intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
@@ -198,8 +218,13 @@ void intel_vrr_get_config(struct intel_crtc *crtc,
if (!crtc_state->vrr.enable)
return;
- if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
- crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+ if (DISPLAY_VER(dev_priv) >= 13)
+ crtc_state->vrr.guardband =
+ REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl);
+ else
+ if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+ crtc_state->vrr.pipeline_full =
+ REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN)
crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1;
crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 17a98cb627df..394b7bbf48d8 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -2,6 +2,7 @@
/*
* Copyright © 2020 Intel Corporation
*/
+#include "intel_de.h"
#include "intel_display_types.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 7ffd7b570b54..92a4fd508e92 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "intel_atomic_plane.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_pm.h"
@@ -198,6 +199,13 @@ static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
+static const u64 adlp_step_a_plane_format_modifiers[] = {
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
{
switch (format) {
@@ -267,7 +275,7 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
{
- if (HAS_D12_PLANE_MINIMIZATION(i915))
+ if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915))
return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3);
else
return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5);
@@ -286,40 +294,64 @@ bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
icl_hdr_plane_mask() & BIT(plane_id);
}
+static int icl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+
+ /* two pixels per clock */
+ return DIV_ROUND_UP(pixel_rate, 2);
+}
+
static void
-skl_plane_ratio(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
+glk_plane_ratio(const struct intel_plane_state *plane_state,
unsigned int *num, unsigned int *den)
{
- struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
if (fb->format->cpp[0] == 8) {
- if (DISPLAY_VER(dev_priv) >= 10) {
- *num = 10;
- *den = 8;
- } else {
- *num = 9;
- *den = 8;
- }
+ *num = 10;
+ *den = 8;
} else {
*num = 1;
*den = 1;
}
}
-static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+static int glk_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
- unsigned int num, den;
unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+ unsigned int num, den;
- skl_plane_ratio(crtc_state, plane_state, &num, &den);
+ glk_plane_ratio(plane_state, &num, &den);
- /* two pixels per clock on glk+ */
- if (DISPLAY_VER(dev_priv) >= 10)
- den *= 2;
+ /* two pixels per clock */
+ return DIV_ROUND_UP(pixel_rate * num, 2 * den);
+}
+
+static void
+skl_plane_ratio(const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (fb->format->cpp[0] == 8) {
+ *num = 9;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+ unsigned int num, den;
+
+ skl_plane_ratio(plane_state, &num, &den);
return DIV_ROUND_UP(pixel_rate * num, den);
}
@@ -457,17 +489,35 @@ skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
+ int max_horizontal_pixels = 8192;
+ int max_stride_bytes;
+
+ if (DISPLAY_VER(i915) >= 13) {
+ /*
+ * The stride in bytes must not exceed of the size
+ * of 128K bytes. For pixel formats of 64bpp will allow
+ * for a 16K pixel surface.
+ */
+ max_stride_bytes = 131072;
+ if (cpp == 8)
+ max_horizontal_pixels = 16384;
+ else
+ max_horizontal_pixels = 65536;
+ } else {
+ /*
+ * "The stride in bytes must not exceed the
+ * of the size of 8K pixels and 32K bytes."
+ */
+ max_stride_bytes = 32768;
+ }
- /*
- * "The stride in bytes must not exceed the
- * of the size of 8K pixels and 32K bytes."
- */
if (drm_rotation_90_or_270(rotation))
- return min(8192, 32768 / cpp);
+ return min(max_horizontal_pixels, max_stride_bytes / cpp);
else
- return min(8192 * cpp, 32768);
+ return min(max_horizontal_pixels * cpp, max_stride_bytes);
}
@@ -800,6 +850,29 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect)
return 0;
}
+static u32 adlp_plane_ctl_arb_slots(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
+ switch (fb->format->cpp[0]) {
+ case 2:
+ return PLANE_CTL_ARB_SLOTS(1);
+ default:
+ return PLANE_CTL_ARB_SLOTS(0);
+ }
+ } else {
+ switch (fb->format->cpp[0]) {
+ case 8:
+ return PLANE_CTL_ARB_SLOTS(3);
+ case 4:
+ return PLANE_CTL_ARB_SLOTS(1);
+ default:
+ return PLANE_CTL_ARB_SLOTS(0);
+ }
+ }
+}
+
static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -829,7 +902,7 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
- if (DISPLAY_VER(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) < 10) {
plane_ctl |= skl_plane_ctl_alpha(plane_state);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
@@ -853,6 +926,10 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
else if (key->flags & I915_SET_COLORKEY_SOURCE)
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+ /* Wa_22012358565:adlp */
+ if (DISPLAY_VER(dev_priv) == 13)
+ plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
+
return plane_ctl;
}
@@ -909,6 +986,21 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
return plane_color_ctl;
}
+static u32 skl_surf_address(const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ u32 offset = plane_state->view.color_plane[color_plane].offset;
+
+ if (intel_fb_uses_dpt(fb)) {
+ WARN_ON(offset & 0x1fffff);
+ return offset >> 9;
+ } else {
+ WARN_ON(offset & 0xfff);
+ return offset;
+ }
+}
+
static void
skl_program_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -919,7 +1011,7 @@ skl_program_plane(struct intel_plane *plane,
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 surf_addr = plane_state->view.color_plane[color_plane].offset;
+ u32 surf_addr = skl_surf_address(plane_state, color_plane);
u32 stride = skl_plane_stride(plane_state, color_plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int aux_plane = skl_main_to_aux_plane(fb, color_plane);
@@ -958,7 +1050,7 @@ skl_program_plane(struct intel_plane *plane,
}
if (aux_plane) {
- aux_dist = plane_state->view.color_plane[aux_plane].offset - surf_addr;
+ aux_dist = skl_surf_address(plane_state, aux_plane) - surf_addr;
if (DISPLAY_VER(dev_priv) < 12)
aux_dist |= skl_plane_stride(plane_state, aux_plane);
@@ -1008,6 +1100,14 @@ skl_program_plane(struct intel_plane *plane,
intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
/*
+ * Enable the scaler before the plane so that we don't
+ * get a catastrophic underrun even if the two operations
+ * end up happening in two different frames.
+ */
+ if (plane_state->scaler_id >= 0)
+ skl_program_plane_scaler(plane, crtc_state, plane_state);
+
+ /*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
@@ -1016,9 +1116,6 @@ skl_program_plane(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- if (plane_state->scaler_id >= 0)
- skl_program_plane_scaler(plane, crtc_state, plane_state);
-
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1082,7 +1179,6 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
- struct drm_format_name_buf format_name;
if (!fb)
return 0;
@@ -1103,8 +1199,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
if (drm_rotation_90_or_270(rotation)) {
- if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
+ if (!intel_fb_supports_90_270_rotation(to_intel_framebuffer(fb))) {
drm_dbg_kms(&dev_priv->drm,
"Y/Yf tiling required for 90/270!\n");
return -EINVAL;
@@ -1130,9 +1225,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
drm_dbg_kms(&dev_priv->drm,
- "Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ "Unsupported pixel format %p4cc for 90/270!\n",
+ &fb->format->format);
return -EINVAL;
default:
break;
@@ -1184,7 +1278,7 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
* than the cursor ending less than 4 pixels from the left edge of the
* screen may cause FIFO underflow and display corruption.
*/
- if (IS_DISPLAY_VER(dev_priv, 10) &&
+ if (DISPLAY_VER(dev_priv) == 10 &&
(crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
drm_dbg_kms(&dev_priv->drm,
"requested plane X %s position %d invalid (valid range %d-%d)\n",
@@ -1412,7 +1506,10 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
}
}
- drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
+ if (DISPLAY_VER(dev_priv) >= 13)
+ drm_WARN_ON(&dev_priv->drm, x > 65535 || y > 65535);
+ else
+ drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
plane_state->view.color_plane[0].offset = offset;
plane_state->view.color_plane[0].x = x;
@@ -1486,7 +1583,10 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
}
}
- drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
+ if (DISPLAY_VER(i915) >= 13)
+ drm_WARN_ON(&i915->drm, x > 65535 || y > 65535);
+ else
+ drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
plane_state->view.color_plane[uv_plane].offset = offset;
plane_state->view.color_plane[uv_plane].x = x;
@@ -1671,7 +1771,7 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
return false;
- if (IS_DISPLAY_VER(dev_priv, 9) && pipe == PIPE_C)
+ if (DISPLAY_VER(dev_priv) == 9 && pipe == PIPE_C)
return false;
if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
@@ -1813,6 +1913,10 @@ static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
return false;
+ /* Wa_22011186057 */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+ return false;
+
return plane_id < PLANE_SPRITE4;
}
@@ -1830,8 +1934,12 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
+ break;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ /* Wa_22011186057 */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+ return false;
break;
default:
return false;
@@ -1886,7 +1994,10 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
enum plane_id plane_id)
{
- if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
+ /* Wa_22011186057 */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+ return adlp_step_a_plane_format_modifiers;
+ else if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
return gen12_plane_format_modifiers_mc_ccs;
else
return gen12_plane_format_modifiers_rc_ccs;
@@ -1965,12 +2076,15 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->min_width = icl_plane_min_width;
plane->max_width = icl_plane_max_width;
plane->max_height = icl_plane_max_height;
+ plane->min_cdclk = icl_plane_min_cdclk;
} else if (DISPLAY_VER(dev_priv) >= 10) {
plane->max_width = glk_plane_max_width;
plane->max_height = skl_plane_max_height;
+ plane->min_cdclk = glk_plane_min_cdclk;
} else {
plane->max_width = skl_plane_max_width;
plane->max_height = skl_plane_max_height;
+ plane->min_cdclk = skl_plane_min_cdclk;
}
plane->max_stride = skl_plane_max_stride;
@@ -1978,11 +2092,10 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = skl_disable_plane;
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
- plane->min_cdclk = skl_plane_min_cdclk;
if (plane_id == PLANE_PRIMARY) {
- plane->need_async_flip_disable_wa = IS_DISPLAY_RANGE(dev_priv,
- 9, 10);
+ plane->need_async_flip_disable_wa = IS_DISPLAY_VER(dev_priv,
+ 9, 10);
plane->async_flip = skl_plane_async_flip;
plane->enable_flip_done = skl_plane_enable_flip_done;
plane->disable_flip_done = skl_plane_disable_flip_done;
@@ -2024,9 +2137,12 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
if (ret)
goto fail;
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
+ if (DISPLAY_VER(dev_priv) >= 13)
+ supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+ else
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
supported_rotations |= DRM_MODE_REFLECT_X;
@@ -2197,7 +2313,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
- fb->pitches[0] = (val & 0x3ff) * stride_mult;
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ fb->pitches[0] = (val & PLANE_STRIDE_MASK_XELPD) * stride_mult;
+ else
+ fb->pitches[0] = (val & PLANE_STRIDE_MASK) * stride_mult;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
@@ -2215,4 +2335,3 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
error:
kfree(intel_fb);
}
-
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 74a27508759d..084c9c43b2ed 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -33,6 +33,8 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
@@ -297,7 +299,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
else
pipe_config->pipe_bpp = 18;
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
/* Enable Frame time stamp based scanline reporting */
pipe_config->mode_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
@@ -522,7 +524,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
if (IS_GEMINILAKE(dev_priv))
glk_dsi_device_ready(encoder);
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_dsi_device_ready(encoder);
else
vlv_dsi_device_ready(encoder);
@@ -601,7 +603,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
drm_dbg_kms(&dev_priv->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
+ i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
@@ -621,7 +623,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
* Port A only. MIPI Port C has no similar bit for checking.
*/
- if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || port == PORT_A) &&
intel_de_wait_for_clear(dev_priv, port_ctrl,
AFE_LATCHOUT, 30))
drm_err(&dev_priv->drm, "DSI LP not going Low\n");
@@ -646,7 +648,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp;
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports) {
temp = intel_de_read(dev_priv,
MIPI_CTRL(port));
@@ -666,7 +668,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
+ i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -703,7 +705,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
+ i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -714,6 +716,19 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
}
}
+static void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi)
+{
+ ktime_t panel_power_on_time;
+ s64 panel_power_off_duration;
+
+ panel_power_on_time = ktime_get_boottime();
+ panel_power_off_duration = ktime_ms_delta(panel_power_on_time,
+ intel_dsi->panel_power_off_time);
+
+ if (panel_power_off_duration < (s64)intel_dsi->panel_pwr_cycle_delay)
+ msleep(intel_dsi->panel_pwr_cycle_delay - panel_power_off_duration);
+}
+
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
@@ -775,13 +790,15 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
drm_dbg_kms(&dev_priv->drm, "\n");
+ intel_dsi_wait_panel_power_cycle(intel_dsi);
+
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/*
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
bxt_dsi_pll_disable(encoder);
bxt_dsi_pll_enable(encoder, pipe_config);
} else {
@@ -846,8 +863,10 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
/* Send initialization commands in LP mode */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
- /* Enable port in pre-enable phase itself because as per hw team
- * recommendation, port should be enabled befor plane & pipe */
+ /*
+ * Enable port in pre-enable phase itself because as per hw team
+ * recommendation, port should be enabled before plane & pipe
+ */
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
intel_de_write(dev_priv,
@@ -932,7 +951,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
drm_dbg_kms(&dev_priv->drm, "\n");
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
intel_crtc_vblank_off(old_crtc_state);
skl_scaler_disable(old_crtc_state);
@@ -971,7 +990,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
val & ~MIPIO_RST_CTRL);
}
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
bxt_dsi_pll_disable(encoder);
} else {
u32 val;
@@ -989,18 +1008,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
- /*
- * FIXME As we do with eDP, just make a note of the time here
- * and perform the wait before the next panel power on.
- */
- msleep(intel_dsi->panel_pwr_cycle_delay);
+ intel_dsi->panel_power_off_time = ktime_get_boottime();
}
static void intel_dsi_shutdown(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- msleep(intel_dsi->panel_pwr_cycle_delay);
+ intel_dsi_wait_panel_power_cycle(intel_dsi);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -1024,12 +1039,13 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if (IS_GEN9_LP(dev_priv) && !bxt_dsi_pll_is_enabled(dev_priv))
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ !bxt_dsi_pll_is_enabled(dev_priv))
goto out_put_power;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ?
+ i915_reg_t ctrl_reg = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE;
@@ -1055,7 +1071,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
continue;
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
@@ -1251,7 +1267,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = bxt_dsi_get_pclk(encoder, pipe_config);
} else {
@@ -1317,7 +1333,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1407,7 +1423,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
intel_de_write(dev_priv, MIPI_CTRL(port),
tmp | READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_GEN9_LP(dev_priv)) {
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
@@ -1445,7 +1461,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
tmp |= BXT_DPHY_DEFEATURE_EN;
if (!is_cmd_mode(intel_dsi))
tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
@@ -1492,7 +1508,8 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ !intel_dsi->dual_link) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
@@ -1570,7 +1587,7 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
/* Panel commands can be sent when clock is in LP11 */
intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0);
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
@@ -1828,7 +1845,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
else
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
@@ -1854,7 +1871,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
intel_encoder->enable = bxt_dsi_enable;
intel_encoder->disable = intel_dsi_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
@@ -1874,13 +1891,15 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
intel_encoder->pipe_mask = ~0;
else if (port == PORT_A)
intel_encoder->pipe_mask = BIT(PIPE_A);
else
intel_encoder->pipe_mask = BIT(PIPE_B);
+ intel_dsi->panel_power_off_time = ktime_get_boottime();
+
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
else
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 4070b00c3690..90185b219447 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_sideband.h"
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
index 9e508e7d4629..7df91b7e4ca8 100644
--- a/drivers/gpu/drm/i915/dma_resv_utils.c
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -10,7 +10,7 @@
void dma_resv_prune(struct dma_resv *resv)
{
if (dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled(resv, true))
dma_resv_add_excl_fence(resv, NULL);
dma_resv_unlock(resv);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 25235ef630c1..6234e17259c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !dma_resv_test_signaled_rcu(obj->resv, true);
+ * !dma_resv_test_signaled(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
@@ -113,11 +113,10 @@ retry:
seq = raw_read_seqcount(&obj->base.resv->seq);
/* Translate the exclusive fence to the READ *and* WRITE engine */
- args->busy =
- busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
+ args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
/* Translate shared fences to READ set of engines */
- list = rcu_dereference(obj->base.resv->fence);
+ list = dma_resv_shared_list(obj->base.resv);
if (list) {
unsigned int shared_count = list->shared_count, i;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index fd8ee52e17a4..7720b8c22c81 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1046,7 +1046,6 @@ struct context_barrier_task {
void *data;
};
-__i915_active_call
static void cb_retire(struct i915_active *base)
{
struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
@@ -1080,7 +1079,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (!cb)
return -ENOMEM;
- i915_active_init(&cb->base, NULL, cb_retire);
+ i915_active_init(&cb->base, NULL, cb_retire, 0);
err = i915_active_acquire(&cb->base);
if (err) {
kfree(cb);
@@ -1191,7 +1190,7 @@ static void set_ppgtt_barrier(void *data)
{
struct i915_address_space *old = data;
- if (INTEL_GEN(old->i915) < 8)
+ if (GRAPHICS_VER(old->i915) < 8)
gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
i915_vm_close(old);
@@ -1437,7 +1436,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt,
context->max_eus_per_subslice = user->max_eus_per_subslice;
/* Part specific restrictions. */
- if (IS_GEN(i915, 11)) {
+ if (GRAPHICS_VER(i915) == 11) {
unsigned int hw_s = hweight8(device->slice_mask);
unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
unsigned int req_s = hweight8(context->slice_mask);
@@ -1504,7 +1503,7 @@ static int set_sseu(struct i915_gem_context *ctx,
if (args->size < sizeof(user_sseu))
return -EINVAL;
- if (!IS_GEN(i915, 11))
+ if (GRAPHICS_VER(i915) != 11)
return -ENODEV;
if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 45d60e3d98e3..548ddf39d853 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -4,44 +4,102 @@
*/
#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_user_extensions.h"
+
+static u32 object_max_page_size(struct drm_i915_gem_object *obj)
+{
+ u32 max_page_size = 0;
+ int i;
+
+ for (i = 0; i < obj->mm.n_placements; i++) {
+ struct intel_memory_region *mr = obj->mm.placements[i];
+
+ GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+ max_page_size = max_t(u32, max_page_size, mr->min_page_size);
+ }
+
+ GEM_BUG_ON(!max_page_size);
+ return max_page_size;
+}
+
+static void object_set_placements(struct drm_i915_gem_object *obj,
+ struct intel_memory_region **placements,
+ unsigned int n_placements)
+{
+ GEM_BUG_ON(!n_placements);
+
+ /*
+ * For the common case of one memory region, skip storing an
+ * allocated array and just point at the region directly.
+ */
+ if (n_placements == 1) {
+ struct intel_memory_region *mr = placements[0];
+ struct drm_i915_private *i915 = mr->i915;
+
+ obj->mm.placements = &i915->mm.regions[mr->id];
+ obj->mm.n_placements = 1;
+ } else {
+ obj->mm.placements = placements;
+ obj->mm.n_placements = n_placements;
+ }
+}
+
+static int i915_gem_publish(struct drm_i915_gem_object *obj,
+ struct drm_file *file,
+ u64 *size_p,
+ u32 *handle_p)
+{
+ u64 size = obj->base.size;
+ int ret;
+
+ ret = drm_gem_handle_create(file, &obj->base, handle_p);
+ /* drop reference from allocate - handle holds it now */
+ i915_gem_object_put(obj);
+ if (ret)
+ return ret;
+
+ *size_p = size;
+ return 0;
+}
static int
-i915_gem_create(struct drm_file *file,
- struct intel_memory_region *mr,
- u64 *size_p,
- u32 *handle_p)
+i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
{
- struct drm_i915_gem_object *obj;
- u32 handle;
- u64 size;
+ struct intel_memory_region *mr = obj->mm.placements[0];
+ unsigned int flags;
int ret;
- GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
- size = round_up(*size_p, mr->min_page_size);
+ size = round_up(size, object_max_page_size(obj));
if (size == 0)
return -EINVAL;
/* For most of the ABI (e.g. mmap) we think in system pages */
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
- /* Allocate the new object */
- obj = i915_gem_object_create_region(mr, size, 0);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ if (i915_gem_object_size_2big(size))
+ return -E2BIG;
- GEM_BUG_ON(size != obj->base.size);
+ /*
+ * For now resort to CPU based clearing for device local-memory, in the
+ * near future this will use the blitter engine for accelerated, GPU
+ * based clearing.
+ */
+ flags = 0;
+ if (mr->type == INTEL_MEMORY_LOCAL)
+ flags = I915_BO_ALLOC_CPU_CLEAR;
- ret = drm_gem_handle_create(file, &obj->base, &handle);
- /* drop reference from allocate - handle holds it now */
- i915_gem_object_put(obj);
+ ret = mr->ops->init_object(mr, obj, size, flags);
if (ret)
return ret;
- *handle_p = handle;
- *size_p = size;
+ GEM_BUG_ON(size != obj->base.size);
+
+ trace_i915_gem_object_create(obj);
return 0;
}
@@ -50,9 +108,12 @@ i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
+ struct drm_i915_gem_object *obj;
+ struct intel_memory_region *mr;
enum intel_memory_type mem_type;
int cpp = DIV_ROUND_UP(args->bpp, 8);
u32 format;
+ int ret;
switch (cpp) {
case 1:
@@ -85,10 +146,22 @@ i915_gem_dumb_create(struct drm_file *file,
if (HAS_LMEM(to_i915(dev)))
mem_type = INTEL_MEMORY_LOCAL;
- return i915_gem_create(file,
- intel_memory_region_by_type(to_i915(dev),
- mem_type),
- &args->size, &args->handle);
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return -ENOMEM;
+
+ mr = intel_memory_region_by_type(to_i915(dev), mem_type);
+ object_set_placements(obj, &mr, 1);
+
+ ret = i915_gem_setup(obj, args->size);
+ if (ret)
+ goto object_free;
+
+ return i915_gem_publish(obj, file, &args->size, &args->handle);
+
+object_free:
+ i915_gem_object_free(obj);
+ return ret;
}
/**
@@ -103,11 +176,229 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_create *args = data;
+ struct drm_i915_gem_object *obj;
+ struct intel_memory_region *mr;
+ int ret;
i915_gem_flush_free_objects(i915);
- return i915_gem_create(file,
- intel_memory_region_by_type(i915,
- INTEL_MEMORY_SYSTEM),
- &args->size, &args->handle);
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return -ENOMEM;
+
+ mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
+ object_set_placements(obj, &mr, 1);
+
+ ret = i915_gem_setup(obj, args->size);
+ if (ret)
+ goto object_free;
+
+ return i915_gem_publish(obj, file, &args->size, &args->handle);
+
+object_free:
+ i915_gem_object_free(obj);
+ return ret;
+}
+
+struct create_ext {
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *vanilla_object;
+};
+
+static void repr_placements(char *buf, size_t size,
+ struct intel_memory_region **placements,
+ int n_placements)
+{
+ int i;
+
+ buf[0] = '\0';
+
+ for (i = 0; i < n_placements; i++) {
+ struct intel_memory_region *mr = placements[i];
+ int r;
+
+ r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }",
+ mr->name, mr->type, mr->instance);
+ if (r >= size)
+ return;
+
+ buf += r;
+ size -= r;
+ }
+}
+
+static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
+ struct create_ext *ext_data)
+{
+ struct drm_i915_private *i915 = ext_data->i915;
+ struct drm_i915_gem_memory_class_instance __user *uregions =
+ u64_to_user_ptr(args->regions);
+ struct drm_i915_gem_object *obj = ext_data->vanilla_object;
+ struct intel_memory_region **placements;
+ u32 mask;
+ int i, ret = 0;
+
+ if (args->pad) {
+ drm_dbg(&i915->drm, "pad should be zero\n");
+ ret = -EINVAL;
+ }
+
+ if (!args->num_regions) {
+ drm_dbg(&i915->drm, "num_regions is zero\n");
+ ret = -EINVAL;
+ }
+
+ if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
+ drm_dbg(&i915->drm, "num_regions is too large\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ placements = kmalloc_array(args->num_regions,
+ sizeof(struct intel_memory_region *),
+ GFP_KERNEL);
+ if (!placements)
+ return -ENOMEM;
+
+ mask = 0;
+ for (i = 0; i < args->num_regions; i++) {
+ struct drm_i915_gem_memory_class_instance region;
+ struct intel_memory_region *mr;
+
+ if (copy_from_user(&region, uregions, sizeof(region))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ mr = intel_memory_region_lookup(i915,
+ region.memory_class,
+ region.memory_instance);
+ if (!mr || mr->private) {
+ drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
+ region.memory_class, region.memory_instance, i);
+ ret = -EINVAL;
+ goto out_dump;
+ }
+
+ if (mask & BIT(mr->id)) {
+ drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
+ mr->name, region.memory_class,
+ region.memory_instance, i);
+ ret = -EINVAL;
+ goto out_dump;
+ }
+
+ placements[i] = mr;
+ mask |= BIT(mr->id);
+
+ ++uregions;
+ }
+
+ if (obj->mm.placements) {
+ ret = -EINVAL;
+ goto out_dump;
+ }
+
+ object_set_placements(obj, placements, args->num_regions);
+ if (args->num_regions == 1)
+ kfree(placements);
+
+ return 0;
+
+out_dump:
+ if (1) {
+ char buf[256];
+
+ if (obj->mm.placements) {
+ repr_placements(buf,
+ sizeof(buf),
+ obj->mm.placements,
+ obj->mm.n_placements);
+ drm_dbg(&i915->drm,
+ "Placements were already set in previous EXT. Existing placements: %s\n",
+ buf);
+ }
+
+ repr_placements(buf, sizeof(buf), placements, i);
+ drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
+ }
+
+out_free:
+ kfree(placements);
+ return ret;
+}
+
+static int ext_set_placements(struct i915_user_extension __user *base,
+ void *data)
+{
+ struct drm_i915_gem_create_ext_memory_regions ext;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM))
+ return -ENODEV;
+
+ if (copy_from_user(&ext, base, sizeof(ext)))
+ return -EFAULT;
+
+ return set_placements(&ext, data);
+}
+
+static const i915_user_extension_fn create_extensions[] = {
+ [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
+};
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ */
+int
+i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_create_ext *args = data;
+ struct create_ext ext_data = { .i915 = i915 };
+ struct intel_memory_region **placements_ext;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (args->flags)
+ return -EINVAL;
+
+ i915_gem_flush_free_objects(i915);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return -ENOMEM;
+
+ ext_data.vanilla_object = obj;
+ ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ create_extensions,
+ ARRAY_SIZE(create_extensions),
+ &ext_data);
+ placements_ext = obj->mm.placements;
+ if (ret)
+ goto object_free;
+
+ if (!placements_ext) {
+ struct intel_memory_region *mr =
+ intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
+
+ object_set_placements(obj, &mr, 1);
+ }
+
+ ret = i915_gem_setup(obj, args->size);
+ if (ret)
+ goto object_free;
+
+ return i915_gem_publish(obj, file, &args->size, &args->handle);
+
+object_free:
+ if (obj->mm.n_placements > 1)
+ kfree(placements_ext);
+ i915_gem_object_free(obj);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index ccede73c6465..616c3a2f1baf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -209,7 +209,7 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
if (IS_ERR(pages))
return PTR_ERR(pages);
- sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+ sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 5964e67c7d36..a8abc9af5ff4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -274,7 +274,7 @@ struct i915_execbuffer {
struct drm_mm_node node; /** temporary GTT binding */
unsigned long vaddr; /** Current kmap address */
unsigned long page; /** Currently mapped page index */
- unsigned int gen; /** Cached value of INTEL_GEN */
+ unsigned int graphics_ver; /** Cached value of GRAPHICS_VER */
bool use_64bit_reloc : 1;
bool has_llc : 1;
bool has_fence : 1;
@@ -500,7 +500,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
* also covers all platforms with local memory.
*/
if (entry->relocation_count &&
- INTEL_GEN(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915))
+ GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915))
return -EINVAL;
if (unlikely(entry->flags & eb->invalid_flags))
@@ -1049,10 +1049,10 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->page = -1;
cache->vaddr = 0;
/* Must be a variable in the struct to allow GCC to unroll. */
- cache->gen = INTEL_GEN(i915);
+ cache->graphics_ver = GRAPHICS_VER(i915);
cache->has_llc = HAS_LLC(i915);
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
- cache->has_fence = cache->gen < 4;
+ cache->has_fence = cache->graphics_ver < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
cache->node.flags = 0;
reloc_cache_clear(cache);
@@ -1402,7 +1402,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
err = eb->engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE,
- cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
+ cache->graphics_ver > 5 ? 0 : I915_DISPATCH_SECURE);
if (err)
goto skip_request;
@@ -1439,7 +1439,7 @@ err_pool:
static bool reloc_can_use_engine(const struct intel_engine_cs *engine)
{
- return engine->class != VIDEO_DECODE_CLASS || !IS_GEN(engine->i915, 6);
+ return engine->class != VIDEO_DECODE_CLASS || GRAPHICS_VER(engine->i915) != 6;
}
static u32 *reloc_gpu(struct i915_execbuffer *eb,
@@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
if (DBG_FORCE_RELOC)
return false;
- return !dma_resv_test_signaled_rcu(vma->resv, true);
+ return !dma_resv_test_signaled(vma->resv, true);
}
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
@@ -1503,14 +1503,14 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
u64 offset,
u64 target_addr)
{
- const unsigned int gen = eb->reloc_cache.gen;
+ const unsigned int ver = eb->reloc_cache.graphics_ver;
unsigned int len;
u32 *batch;
u64 addr;
- if (gen >= 8)
+ if (ver >= 8)
len = offset & 7 ? 8 : 5;
- else if (gen >= 4)
+ else if (ver >= 4)
len = 4;
else
len = 3;
@@ -1522,7 +1522,7 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
return false;
addr = gen8_canonical_addr(vma->node.start + offset);
- if (gen >= 8) {
+ if (ver >= 8) {
if (offset & 7) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(addr);
@@ -1542,7 +1542,7 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
*batch++ = lower_32_bits(target_addr);
*batch++ = upper_32_bits(target_addr);
}
- } else if (gen >= 6) {
+ } else if (ver >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = addr;
@@ -1552,12 +1552,12 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
*batch++ = 0;
*batch++ = vma_phys_addr(vma, offset);
*batch++ = target_addr;
- } else if (gen >= 4) {
+ } else if (ver >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = addr;
*batch++ = target_addr;
- } else if (gen >= 3 &&
+ } else if (ver >= 3 &&
!(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) {
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
@@ -1671,7 +1671,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* batchbuffers.
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- IS_GEN(eb->i915, 6)) {
+ GRAPHICS_VER(eb->i915) == 6) {
err = i915_vma_bind(target->vma,
target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
@@ -2332,7 +2332,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
- if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) {
+ if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) {
drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -3375,7 +3375,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
- if (INTEL_GEN(i915) >= 11)
+ if (GRAPHICS_VER(i915) >= 11)
return -ENODEV;
/* Return -EPERM to trigger fallback code on old binaries. */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index 7fd22f3efbef..28d6526e32ab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -14,6 +14,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index ce1c83c13d05..3b4aa28a076d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -4,22 +4,95 @@
*/
#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+static void lmem_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
+ obj->mm.dirty = false;
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static int lmem_get_pages(struct drm_i915_gem_object *obj)
+{
+ unsigned int flags;
+ struct sg_table *pages;
+
+ flags = I915_ALLOC_MIN_PAGE_SIZE;
+ if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+ flags |= I915_ALLOC_CONTIGUOUS;
+
+ obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
+ obj->base.size,
+ flags);
+ if (IS_ERR(obj->mm.st_mm_node))
+ return PTR_ERR(obj->mm.st_mm_node);
+
+ /* Range manager is always contigous */
+ if (obj->mm.region->is_range_manager)
+ obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
+ pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
+ if (IS_ERR(pages)) {
+ intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
+ return PTR_ERR(pages);
+ }
+
+ __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
+
+ if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
+ void __iomem *vaddr =
+ i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+
+ if (!vaddr) {
+ struct sg_table *pages =
+ __i915_gem_object_unset_pages(obj);
+
+ if (!IS_ERR_OR_NULL(pages))
+ lmem_put_pages(obj, pages);
+ }
+
+ memset_io(vaddr, 0, obj->base.size);
+ io_mapping_unmap(vaddr);
+ }
+
+ return 0;
+}
+
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
- .get_pages = i915_gem_object_get_pages_buddy,
- .put_pages = i915_gem_object_put_pages_buddy,
+ .get_pages = lmem_get_pages,
+ .put_pages = lmem_put_pages,
.release = i915_gem_object_release_memory_region,
};
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+ resource_size_t offset;
+
+ GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
- return obj->ops == &i915_gem_lmem_obj_ops;
+ struct intel_memory_region *mr = obj->mm.region;
+
+ return mr && (mr->type == INTEL_MEMORY_LOCAL ||
+ mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 036d53c01de9..fac6bc5a5ebb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -14,6 +14,11 @@ struct intel_memory_region;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size);
+
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index f6fe5cb01438..215326764606 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -56,10 +56,18 @@ int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_mmap *args = data;
struct drm_i915_gem_object *obj;
unsigned long addr;
+ /*
+ * mmap ioctl is disallowed for all discrete platforms,
+ * and for all platforms with GRAPHICS_VER > 12.
+ */
+ if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
+ return -EOPNOTSUPP;
+
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
@@ -367,10 +375,11 @@ retry:
goto err_unpin;
/* Finally, remap it using the new GTT offset */
- ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
- (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start));
+ ret = remap_io_mapping(area,
+ area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ min_t(u64, vma->size, area->vm_end - area->vm_start),
+ &ggtt->iomap);
if (ret)
goto err_fence;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index ea74cbca95be..5706d471692d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -62,6 +62,13 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops,
struct lock_class_key *key, unsigned flags)
{
+ /*
+ * A gem object is embedded both in a struct ttm_buffer_object :/ and
+ * in a drm_i915_gem_object. Make sure they are aliased.
+ */
+ BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
+ offsetof(typeof(*obj), __do_not_access.base));
+
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
@@ -249,6 +256,12 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->ops->release)
obj->ops->release(obj);
+ if (obj->mm.n_placements > 1)
+ kfree(obj->mm.placements);
+
+ if (obj->shares_resv_from)
+ i915_vm_resv_put(obj->shares_resv_from);
+
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
cond_resched();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 2ebd79537aea..7c0eb425cb3b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -500,7 +500,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
struct dma_fence *fence;
rcu_read_lock();
- fence = dma_resv_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_unlocked(obj->base.resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index df8e8c18c6c9..3e28c68fda3e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -72,7 +72,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (INTEL_GEN(i915) >= 8) {
+ if (GRAPHICS_VER(i915) >= 8) {
*cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
*cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cmd++ = 0;
@@ -232,7 +232,7 @@ static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
{
u32 height = size >> PAGE_SHIFT;
- if (!IS_GEN(i915, 11))
+ if (GRAPHICS_VER(i915) != 11)
return false;
return height % 4 == 3 && height <= 8;
@@ -297,7 +297,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
size = min_t(u64, rem, block_size);
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (INTEL_GEN(i915) >= 9 &&
+ if (GRAPHICS_VER(i915) >= 9 &&
!wa_1209644611_applies(i915, size)) {
*cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
*cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
@@ -309,7 +309,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
*cmd++ = PAGE_SIZE;
*cmd++ = lower_32_bits(src_offset);
*cmd++ = upper_32_bits(src_offset);
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
*cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
*cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
*cmd++ = 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 8e485cb3343c..d047ea126029 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -10,6 +10,7 @@
#include <linux/mmu_notifier.h>
#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo_api.h>
#include <uapi/drm/i915_drm.h>
#include "i915_active.h"
@@ -99,7 +100,16 @@ struct i915_gem_object_page_iter {
};
struct drm_i915_gem_object {
- struct drm_gem_object base;
+ /*
+ * We might have reason to revisit the below since it wastes
+ * a lot of space for non-ttm gem objects.
+ * In any case, always use the accessors for the ttm_buffer_object
+ * when accessing it.
+ */
+ union {
+ struct drm_gem_object base;
+ struct ttm_buffer_object __do_not_access;
+ };
const struct drm_i915_gem_object_ops *ops;
@@ -149,6 +159,10 @@ struct drm_i915_gem_object {
* when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
*/
struct list_head obj_link;
+ /**
+ * @shared_resv_from: The object shares the resv from this vm.
+ */
+ struct i915_address_space *shares_resv_from;
union {
struct rcu_head rcu;
@@ -172,11 +186,13 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
+#define I915_BO_ALLOC_CPU_CLEAR BIT(3)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
- I915_BO_ALLOC_STRUCT_PAGE)
-#define I915_BO_READONLY BIT(3)
-#define I915_TILING_QUIRK_BIT 4 /* unknown swizzling; do not release! */
+ I915_BO_ALLOC_STRUCT_PAGE | \
+ I915_BO_ALLOC_CPU_CLEAR)
+#define I915_BO_READONLY BIT(4)
+#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
@@ -220,13 +236,21 @@ struct drm_i915_gem_object {
atomic_t shrink_pin;
/**
+ * Priority list of potential placements for this object.
+ */
+ struct intel_memory_region **placements;
+ int n_placements;
+
+ /**
* Memory region for this object.
*/
struct intel_memory_region *region;
+
/**
- * List of memory region blocks allocated for this object.
+ * Memory manager node allocated for this object.
*/
- struct list_head blocks;
+ void *st_mm_node;
+
/**
* Element within memory_region->objects or region->purgeable
* if the object is marked as DONTNEED. Access is protected by
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index aed8a37ccdc9..6444e097016d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_set_tiling_quirk(obj);
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
+ atomic_inc(&obj->mm.shrink_pin);
shrinkable = false;
}
@@ -473,7 +475,8 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
might_sleep();
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ if (!i915_gem_object_has_pinned_pages(obj))
+ assert_object_held(obj);
/* As we iterate forward through the sg, we record each entry in a
* radixtree for quick repeated (backwards) lookups. If we have seen
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 81dc2bf59bc3..be72ad0634ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -8,7 +8,6 @@
#include <linux/shmem_fs.h>
#include <linux/swap.h>
-#include <drm/drm.h> /* for drm_legacy.h! */
#include <drm/drm_cache.h>
#include "gt/intel_gt.h"
@@ -208,7 +207,7 @@ static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
err_xfer:
if (!IS_ERR_OR_NULL(pages)) {
- unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+ unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 6a84fb6dde24..f25e6646c5b7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -8,107 +8,9 @@
#include "i915_drv.h"
#include "i915_trace.h"
-void
-i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
-
- obj->mm.dirty = false;
- sg_free_table(pages);
- kfree(pages);
-}
-
-int
-i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
-{
- const u64 max_segment = i915_sg_segment_size();
- struct intel_memory_region *mem = obj->mm.region;
- struct list_head *blocks = &obj->mm.blocks;
- resource_size_t size = obj->base.size;
- resource_size_t prev_end;
- struct i915_buddy_block *block;
- unsigned int flags;
- struct sg_table *st;
- struct scatterlist *sg;
- unsigned int sg_page_sizes;
- int ret;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- return -ENOMEM;
-
- if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
- kfree(st);
- return -ENOMEM;
- }
-
- flags = I915_ALLOC_MIN_PAGE_SIZE;
- if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
- flags |= I915_ALLOC_CONTIGUOUS;
-
- ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
- if (ret)
- goto err_free_sg;
-
- GEM_BUG_ON(list_empty(blocks));
-
- sg = st->sgl;
- st->nents = 0;
- sg_page_sizes = 0;
- prev_end = (resource_size_t)-1;
-
- list_for_each_entry(block, blocks, link) {
- u64 block_size, offset;
-
- block_size = min_t(u64, size,
- i915_buddy_block_size(&mem->mm, block));
- offset = i915_buddy_block_offset(block);
-
- while (block_size) {
- u64 len;
-
- if (offset != prev_end || sg->length >= max_segment) {
- if (st->nents) {
- sg_page_sizes |= sg->length;
- sg = __sg_next(sg);
- }
-
- sg_dma_address(sg) = mem->region.start + offset;
- sg_dma_len(sg) = 0;
- sg->length = 0;
- st->nents++;
- }
-
- len = min(block_size, max_segment - sg->length);
- sg->length += len;
- sg_dma_len(sg) += len;
-
- offset += len;
- block_size -= len;
-
- prev_end = offset;
- }
- }
-
- sg_page_sizes |= sg->length;
- sg_mark_end(sg);
- i915_sg_trim(st);
-
- __i915_gem_object_set_pages(obj, st, sg_page_sizes);
-
- return 0;
-
-err_free_sg:
- sg_free_table(st);
- kfree(st);
- return ret;
-}
-
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem)
{
- INIT_LIST_HEAD(&obj->mm.blocks);
obj->mm.region = intel_memory_region_get(mem);
if (obj->base.size <= mem->min_page_size)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index ebddc86d78f7..84fcb3297400 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -12,10 +12,6 @@ struct intel_memory_region;
struct drm_i915_gem_object;
struct sg_table;
-int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj);
-void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
- struct sg_table *pages);
-
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem);
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index a9bfa66c8da1..5d16c4462fda 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -628,11 +628,13 @@ static const struct intel_memory_region_ops shmem_region_ops = {
.init_object = shmem_object_init,
};
-struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
+ u16 type, u16 instance)
{
return intel_memory_region_create(i915, 0,
totalram_pages() << PAGE_SHIFT,
PAGE_SIZE, 0,
+ type, instance,
&shmem_region_ops);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 4f9c8d3021ab..f4fb68e8955a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -38,15 +38,17 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
}
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
- unsigned long shrink)
+ unsigned long shrink, bool trylock_vm)
{
unsigned long flags;
flags = 0;
if (shrink & I915_SHRINK_ACTIVE)
- flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+ flags |= I915_GEM_OBJECT_UNBIND_ACTIVE;
if (!(shrink & I915_SHRINK_BOUND))
- flags = I915_GEM_OBJECT_UNBIND_TEST;
+ flags |= I915_GEM_OBJECT_UNBIND_TEST;
+ if (trylock_vm)
+ flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK;
if (i915_gem_object_unbind(obj, flags) == 0)
return true;
@@ -117,6 +119,9 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
unsigned long scanned = 0;
int err;
+ /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
+ bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
+
trace_i915_gem_shrink(i915, target, shrink);
/*
@@ -204,7 +209,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
err = 0;
- if (unsafe_drop_pages(obj, shrink)) {
+ if (unsafe_drop_pages(obj, shrink, trylock_vm)) {
/* May arrive from get_pages on another bo */
if (!ww) {
if (!i915_gem_object_trylock(obj))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index b0597de206de..b0c3a7dc60d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
#include <drm/drm_mm.h>
#include <drm/i915_drm.h>
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
@@ -37,7 +38,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
return -ENODEV;
/* WaSkipStolenMemoryFirstPage:bdw+ */
- if (INTEL_GEN(i915) >= 8 && start < 4096)
+ if (GRAPHICS_VER(i915) >= 8 && start < 4096)
start = 4096;
mutex_lock(&i915->mm.stolen_lock);
@@ -83,14 +84,14 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
*/
/* Make sure we don't clobber the GTT if it's within stolen memory */
- if (INTEL_GEN(i915) <= 4 &&
+ if (GRAPHICS_VER(i915) <= 4 &&
!IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
struct resource stolen[2] = {*dsm, *dsm};
struct resource ggtt_res;
resource_size_t ggtt_start;
ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
- if (IS_GEN(i915, 4))
+ if (GRAPHICS_VER(i915) == 4)
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@@ -122,6 +123,14 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
}
/*
+ * With stolen lmem, we don't need to check if the address range
+ * overlaps with the non-stolen system memory range, since lmem is local
+ * to the gpu.
+ */
+ if (HAS_LMEM(i915))
+ return 0;
+
+ /*
* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
@@ -147,7 +156,7 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
- if (!r && !IS_GEN(i915, 3)) {
+ if (!r && GRAPHICS_VER(i915) != 3) {
drm_err(&i915->drm,
"conflict detected with stolen region: %pR\n",
dsm);
@@ -188,7 +197,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
- drm_WARN(&i915->drm, IS_GEN(i915, 5),
+ drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
"ILK stolen reserved found? 0x%08x\n",
reg_val);
@@ -374,8 +383,9 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
}
-static int i915_gem_init_stolen(struct drm_i915_private *i915)
+static int i915_gem_init_stolen(struct intel_memory_region *mem)
{
+ struct drm_i915_private *i915 = mem->i915;
struct intel_uncore *uncore = &i915->uncore;
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -389,17 +399,17 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
return 0;
}
- if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
+ if (intel_vtd_active() && GRAPHICS_VER(i915) < 8) {
drm_notice(&i915->drm,
"%s, disabling use of stolen memory\n",
"DMAR active");
return 0;
}
- if (resource_size(&intel_graphics_stolen_res) == 0)
+ if (resource_size(&mem->region) == 0)
return 0;
- i915->dsm = intel_graphics_stolen_res;
+ i915->dsm = mem->region;
if (i915_adjust_stolen(i915, &i915->dsm))
return 0;
@@ -411,7 +421,7 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
reserved_base = stolen_top;
reserved_size = 0;
- switch (INTEL_GEN(i915)) {
+ switch (GRAPHICS_VER(i915)) {
case 2:
case 3:
break;
@@ -446,7 +456,7 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
&reserved_base, &reserved_size);
break;
default:
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(GRAPHICS_VER(i915));
fallthrough;
case 11:
case 12:
@@ -627,10 +637,17 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
{
static struct lock_class_key lock_class;
unsigned int cache_level;
+ unsigned int flags;
int err;
+ /*
+ * Stolen objects are always physically contiguous since we just
+ * allocate one big block underneath using the drm_mm range allocator.
+ */
+ flags = I915_BO_ALLOC_CONTIGUOUS;
+
drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
- i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
@@ -640,9 +657,11 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
if (WARN_ON(!i915_gem_object_trylock(obj)))
return -EBUSY;
+ i915_gem_object_init_memory_region(obj, mem);
+
err = i915_gem_object_pin_pages(obj);
- if (!err)
- i915_gem_object_init_memory_region(obj, mem);
+ if (err)
+ i915_gem_object_release_memory_region(obj);
i915_gem_object_unlock(obj);
return err;
@@ -667,7 +686,8 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
if (!stolen)
return -ENOMEM;
- ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
+ ret = i915_gem_stolen_insert_node(i915, stolen, size,
+ mem->min_page_size);
if (ret)
goto err_free;
@@ -688,39 +708,128 @@ struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size)
{
- return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN_SMEM],
- size, I915_BO_ALLOC_CONTIGUOUS);
+ return i915_gem_object_create_region(i915->mm.stolen_region, size, 0);
}
-static int init_stolen(struct intel_memory_region *mem)
+static int init_stolen_smem(struct intel_memory_region *mem)
{
- intel_memory_region_set_name(mem, "stolen");
-
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
- return i915_gem_init_stolen(mem->i915);
+ return i915_gem_init_stolen(mem);
+}
+
+static void release_stolen_smem(struct intel_memory_region *mem)
+{
+ i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
+ .init = init_stolen_smem,
+ .release = release_stolen_smem,
+ .init_object = _i915_gem_object_stolen_init,
+};
+
+static int init_stolen_lmem(struct intel_memory_region *mem)
+{
+ int err;
+
+ if (GEM_WARN_ON(resource_size(&mem->region) == 0))
+ return -ENODEV;
+
+ if (!io_mapping_init_wc(&mem->iomap,
+ mem->io_start,
+ resource_size(&mem->region)))
+ return -EIO;
+
+ /*
+ * TODO: For stolen lmem we mostly just care about populating the dsm
+ * related bits and setting up the drm_mm allocator for the range.
+ * Perhaps split up i915_gem_init_stolen() for this.
+ */
+ err = i915_gem_init_stolen(mem);
+ if (err)
+ goto err_fini;
+
+ return 0;
+
+err_fini:
+ io_mapping_fini(&mem->iomap);
+ return err;
}
-static void release_stolen(struct intel_memory_region *mem)
+static void release_stolen_lmem(struct intel_memory_region *mem)
{
+ io_mapping_fini(&mem->iomap);
i915_gem_cleanup_stolen(mem->i915);
}
-static const struct intel_memory_region_ops i915_region_stolen_ops = {
- .init = init_stolen,
- .release = release_stolen,
+static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
+ .init = init_stolen_lmem,
+ .release = release_stolen_lmem,
.init_object = _i915_gem_object_stolen_init,
};
-struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
+struct intel_memory_region *
+i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
+ u16 instance)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct intel_memory_region *mem;
+ resource_size_t io_start;
+ resource_size_t lmem_size;
+ u64 lmem_base;
+
+ lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
+ if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2)))
+ return ERR_PTR(-ENODEV);
+
+ lmem_size = pci_resource_len(pdev, 2) - lmem_base;
+ io_start = pci_resource_start(pdev, 2) + lmem_base;
+
+ mem = intel_memory_region_create(i915, lmem_base, lmem_size,
+ I915_GTT_PAGE_SIZE_4K, io_start,
+ type, instance,
+ &i915_region_stolen_lmem_ops);
+ if (IS_ERR(mem))
+ return mem;
+
+ /*
+ * TODO: consider creating common helper to just print all the
+ * interesting stuff from intel_memory_region, which we can use for all
+ * our probed regions.
+ */
+
+ drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
+ &mem->io_start);
+
+ intel_memory_region_set_name(mem, "stolen-local");
+
+ mem->private = true;
+
+ return mem;
+}
+
+struct intel_memory_region*
+i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
+ u16 instance)
{
- return intel_memory_region_create(i915,
- intel_graphics_stolen_res.start,
- resource_size(&intel_graphics_stolen_res),
- PAGE_SIZE, 0,
- &i915_region_stolen_ops);
+ struct intel_memory_region *mem;
+
+ mem = intel_memory_region_create(i915,
+ intel_graphics_stolen_res.start,
+ resource_size(&intel_graphics_stolen_res),
+ PAGE_SIZE, 0, type, instance,
+ &i915_region_stolen_smem_ops);
+ if (IS_ERR(mem))
+ return mem;
+
+ intel_memory_region_set_name(mem, "stolen-system");
+
+ mem->private = true;
+ return mem;
}
struct drm_i915_gem_object *
@@ -728,7 +837,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
resource_size_t stolen_offset,
resource_size_t size)
{
- struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN_SMEM];
+ struct intel_memory_region *mem = i915->mm.stolen_region;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
@@ -742,8 +851,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
/* KISS and expect everything to be page-aligned */
if (GEM_WARN_ON(size == 0) ||
- GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
- GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
+ GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
+ GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index b03489706796..ccdf7befc571 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -21,7 +21,13 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
-struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915);
+struct intel_memory_region *
+i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
+ u16 instance);
+struct intel_memory_region *
+i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
+ u16 instance);
+
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
resource_size_t size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 9e8945013090..ef4d0f7dc118 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -62,14 +62,14 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
GEM_BUG_ON(!stride);
- if (INTEL_GEN(i915) >= 4) {
+ if (GRAPHICS_VER(i915) >= 4) {
stride *= i915_gem_tile_height(tiling);
GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
return roundup(size, stride);
}
/* Previous chips need a power-of-two fence region when tiling */
- if (IS_GEN(i915, 3))
+ if (GRAPHICS_VER(i915) == 3)
ggtt_size = 1024*1024;
else
ggtt_size = 512*1024;
@@ -102,7 +102,7 @@ u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
if (tiling == I915_TILING_NONE)
return I915_GTT_MIN_ALIGNMENT;
- if (INTEL_GEN(i915) >= 4)
+ if (GRAPHICS_VER(i915) >= 4)
return I965_FENCE_PAGE;
/*
@@ -130,10 +130,10 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
/* check maximum stride & object size */
/* i965+ stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
- if (INTEL_GEN(i915) >= 7) {
+ if (GRAPHICS_VER(i915) >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
- } else if (INTEL_GEN(i915) >= 4) {
+ } else if (GRAPHICS_VER(i915) >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
@@ -144,7 +144,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
return false;
}
- if (IS_GEN(i915, 2) ||
+ if (GRAPHICS_VER(i915) == 2 ||
(tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
tile_width = 128;
else
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index a657b99ec760..7487bab11f0b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -85,8 +85,8 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
return true;
/* we will unbind on next submission, still have userptr pins */
- r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(obj->base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
@@ -173,7 +173,7 @@ alloc_table:
goto err;
}
- sg_page_sizes = i915_sg_page_sizes(st->sgl);
+ sg_page_sizes = i915_sg_dma_sizes(st->sgl);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 4b9856d5ba14..1e97520c62b2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+ ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -73,7 +73,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
*/
prune_fences = count && timeout >= 0;
} else {
- excl = dma_resv_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_unlocked(resv);
}
if (excl && timeout >= 0)
@@ -158,8 +158,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
+ &shared);
if (ret)
return ret;
@@ -170,7 +170,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
kfree(shared);
} else {
- excl = dma_resv_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_unlocked(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index d36873885cc1..176e6b22f87f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -152,8 +152,8 @@ static int prepare_blit(const struct tiled_blits *t,
struct blit_buffer *src,
struct drm_i915_gem_object *batch)
{
- const int gen = INTEL_GEN(to_i915(batch->base.dev));
- bool use_64b_reloc = gen >= 8;
+ const int ver = GRAPHICS_VER(to_i915(batch->base.dev));
+ bool use_64b_reloc = ver >= 8;
u32 src_pitch, dst_pitch;
u32 cmd, *cs;
@@ -171,7 +171,7 @@ static int prepare_blit(const struct tiled_blits *t,
*cs++ = cmd;
cmd = MI_FLUSH_DW;
- if (gen >= 8)
+ if (ver >= 8)
cmd++;
*cs++ = cmd;
*cs++ = 0;
@@ -179,7 +179,7 @@ static int prepare_blit(const struct tiled_blits *t,
*cs++ = 0;
cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
- if (gen >= 8)
+ if (ver >= 8)
cmd += 2;
src_pitch = t->width * 4;
@@ -666,7 +666,7 @@ static int igt_client_tiled_blits(void *arg)
int inst = 0;
/* Test requires explicit BLT tiling controls */
- if (INTEL_GEN(i915) < 4)
+ if (GRAPHICS_VER(i915) < 4)
return 0;
if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index e937b6629019..13b088cc787e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -221,12 +221,12 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
goto out_rq;
}
- if (INTEL_GEN(ctx->engine->i915) >= 8) {
+ if (GRAPHICS_VER(ctx->engine->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
- } else if (INTEL_GEN(ctx->engine->i915) >= 4) {
+ } else if (GRAPHICS_VER(ctx->engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + offset;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 5fef592390cb..dbcfa28a9d91 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -897,7 +897,7 @@ static int rpcs_query_batch(struct drm_i915_gem_object *rpcs, struct i915_vma *v
{
u32 *cmd;
- GEM_BUG_ON(INTEL_GEN(vma->vm->i915) < 8);
+ GEM_BUG_ON(GRAPHICS_VER(vma->vm->i915) < 8);
cmd = i915_gem_object_pin_map(rpcs, I915_MAP_WB);
if (IS_ERR(cmd))
@@ -932,7 +932,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- if (INTEL_GEN(i915) < 8)
+ if (GRAPHICS_VER(i915) < 8)
return -EINVAL;
vma = i915_vma_instance(obj, ce->vm, NULL);
@@ -1100,7 +1100,7 @@ __read_slice_count(struct intel_context *ce,
return ret;
}
- if (INTEL_GEN(ce->engine->i915) >= 11) {
+ if (GRAPHICS_VER(ce->engine->i915) >= 11) {
s_mask = GEN11_RPCS_S_CNT_MASK;
s_shift = GEN11_RPCS_S_CNT_SHIFT;
} else {
@@ -1229,7 +1229,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
int inst = 0;
int ret = 0;
- if (INTEL_GEN(i915) < 9)
+ if (GRAPHICS_VER(i915) < 9)
return 0;
if (flags & TEST_RESET)
@@ -1518,7 +1518,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
}
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
- if (INTEL_GEN(i915) >= 8) {
+ if (GRAPHICS_VER(i915) >= 8) {
*cmd++ = lower_32_bits(offset);
*cmd++ = upper_32_bits(offset);
} else {
@@ -1608,7 +1608,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (INTEL_GEN(i915) >= 8) {
+ if (GRAPHICS_VER(i915) >= 8) {
const u32 GPR0 = engine->mmio_base + 0x600;
vm = i915_gem_context_get_vm_rcu(ctx);
@@ -1740,7 +1740,6 @@ out:
static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
{
struct i915_address_space *vm;
- struct page *page;
u32 *vaddr;
int err = 0;
@@ -1748,24 +1747,18 @@ static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
if (!vm)
return -ENODEV;
- page = __px_page(vm->scratch[0]);
- if (!page) {
+ if (!vm->scratch[0]) {
pr_err("No scratch page!\n");
return -EINVAL;
}
- vaddr = kmap(page);
- if (!vaddr) {
- pr_err("No (mappable) scratch page!\n");
- return -EINVAL;
- }
+ vaddr = __px_vaddr(vm->scratch[0]);
memcpy(out, vaddr, sizeof(*out));
if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
pr_err("Inconsistent initial state of scratch page!\n");
err = -EINVAL;
}
- kunmap(page);
return err;
}
@@ -1783,7 +1776,7 @@ static int igt_vm_isolation(void *arg)
u32 expected;
int err;
- if (INTEL_GEN(i915) < 7)
+ if (GRAPHICS_VER(i915) < 7)
return 0;
/*
@@ -1837,7 +1830,7 @@ static int igt_vm_isolation(void *arg)
continue;
/* Not all engines have their own GPR! */
- if (INTEL_GEN(i915) < 8 && engine->class != RENDER_CLASS)
+ if (GRAPHICS_VER(i915) < 8 && engine->class != RENDER_CLASS)
continue;
while (!__igt_timeout(end_time, NULL)) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 5cf6df49c333..5575172c66f5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -273,7 +273,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
static unsigned int
setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) <= 2) {
+ if (GRAPHICS_VER(i915) <= 2) {
tile->height = 16;
tile->width = 128;
tile->size = 11;
@@ -288,9 +288,9 @@ setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
tile->size = 12;
}
- if (INTEL_GEN(i915) < 4)
+ if (GRAPHICS_VER(i915) < 4)
return 8192 / tile->width;
- else if (INTEL_GEN(i915) < 7)
+ else if (GRAPHICS_VER(i915) < 7)
return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
else
return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
@@ -386,7 +386,7 @@ static int igt_partial_tiling(void *arg)
if (err)
goto out_unlock;
- if (pitch > 2 && INTEL_GEN(i915) >= 4) {
+ if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
tile.stride = tile.width * (pitch - 1);
err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
@@ -395,7 +395,7 @@ static int igt_partial_tiling(void *arg)
goto out_unlock;
}
- if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
+ if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
tile.stride = tile.width * (pitch + 1);
err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
@@ -405,7 +405,7 @@ static int igt_partial_tiling(void *arg)
}
}
- if (INTEL_GEN(i915) >= 4) {
+ if (GRAPHICS_VER(i915) >= 4) {
for_each_prime_number(pitch, max_pitch) {
tile.stride = tile.width * pitch;
err = check_partial_mappings(obj, &tile, end);
@@ -501,7 +501,7 @@ static int igt_smoke_tiling(void *arg)
tile.stride =
i915_prandom_u32_max_state(max_pitch, &prng);
tile.stride = (1 + tile.stride) * tile.width;
- if (INTEL_GEN(i915) < 4)
+ if (GRAPHICS_VER(i915) < 4)
tile.stride = rounddown_pow_of_two(tile.stride);
}
@@ -842,6 +842,24 @@ static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
return true;
}
+static void object_set_placements(struct drm_i915_gem_object *obj,
+ struct intel_memory_region **placements,
+ unsigned int n_placements)
+{
+ GEM_BUG_ON(!n_placements);
+
+ if (n_placements == 1) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_memory_region *mr = placements[0];
+
+ obj->mm.placements = &i915->mm.regions[mr->id];
+ obj->mm.n_placements = 1;
+ } else {
+ obj->mm.placements = placements;
+ obj->mm.n_placements = n_placements;
+ }
+}
+
#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
static int __igt_mmap(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
@@ -871,7 +889,7 @@ static int __igt_mmap(struct drm_i915_private *i915,
pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
- area = find_vma(current->mm, addr);
+ area = vma_lookup(current->mm, addr);
if (!area) {
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
obj->mm.region->name);
@@ -950,6 +968,8 @@ static int igt_mmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
@@ -1068,6 +1088,8 @@ static int igt_mmap_access(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
@@ -1211,6 +1233,8 @@ static int igt_mmap_gpu(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
@@ -1354,6 +1378,8 @@ static int igt_mmap_revoke(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 0b092c62bb34..b35c1219c852 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -44,7 +44,7 @@ igt_emit_store_dw(struct i915_vma *vma,
u32 val)
{
struct drm_i915_gem_object *obj;
- const int gen = INTEL_GEN(vma->vm->i915);
+ const int ver = GRAPHICS_VER(vma->vm->i915);
unsigned long n, size;
u32 *cmd;
int err;
@@ -65,14 +65,14 @@ igt_emit_store_dw(struct i915_vma *vma,
offset += vma->node.start;
for (n = 0; n < count; n++) {
- if (gen >= 8) {
+ if (ver >= 8) {
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
*cmd++ = lower_32_bits(offset);
*cmd++ = upper_32_bits(offset);
*cmd++ = val;
- } else if (gen >= 4) {
+ } else if (ver >= 4) {
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
+ (ver < 6 ? MI_USE_GGTT : 0);
*cmd++ = 0;
*cmd++ = offset;
*cmd++ = val;
@@ -146,7 +146,7 @@ int igt_gpu_fill_dw(struct intel_context *ce,
goto skip_request;
flags = 0;
- if (INTEL_GEN(ce->vm->i915) <= 5)
+ if (GRAPHICS_VER(ce->vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq,
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
index d4f4452ce5ed..4270b5a34a83 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -85,14 +85,14 @@ static int gen6_drpc(struct seq_file *m)
gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
- if (INTEL_GEN(i915) >= 9) {
+ if (GRAPHICS_VER(i915) >= 9) {
gen9_powergate_enable =
intel_uncore_read(uncore, GEN9_PG_ENABLE);
gen9_powergate_status =
intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
}
- if (INTEL_GEN(i915) <= 7)
+ if (GRAPHICS_VER(i915) <= 7)
sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
&rc6vids, NULL);
@@ -100,7 +100,7 @@ static int gen6_drpc(struct seq_file *m)
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
seq_printf(m, "RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
- if (INTEL_GEN(i915) >= 9) {
+ if (GRAPHICS_VER(i915) >= 9) {
seq_printf(m, "Render Well Gating Enabled: %s\n",
yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
seq_printf(m, "Media Well Gating Enabled: %s\n",
@@ -134,7 +134,7 @@ static int gen6_drpc(struct seq_file *m)
seq_printf(m, "Core Power Down: %s\n",
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
- if (INTEL_GEN(i915) >= 9) {
+ if (GRAPHICS_VER(i915) >= 9) {
seq_printf(m, "Render Power Well: %s\n",
(gen9_powergate_status &
GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
@@ -150,7 +150,7 @@ static int gen6_drpc(struct seq_file *m)
print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
- if (INTEL_GEN(i915) <= 7) {
+ if (GRAPHICS_VER(i915) <= 7) {
seq_printf(m, "RC6 voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
seq_printf(m, "RC6+ voltage: %dmV\n",
@@ -230,7 +230,7 @@ static int drpc_show(struct seq_file *m, void *unused)
with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
err = vlv_drpc(m);
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
err = gen6_drpc(m);
else
err = ilk_drpc(m);
@@ -250,7 +250,7 @@ static int frequency_show(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(uncore->rpm);
- if (IS_GEN(i915, 5)) {
+ if (GRAPHICS_VER(i915) == 5) {
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
@@ -296,7 +296,7 @@ static int frequency_show(struct seq_file *m, void *unused)
seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(rps, rps->efficient_freq));
- } else if (INTEL_GEN(i915) >= 6) {
+ } else if (GRAPHICS_VER(i915) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
u32 rp_state_cap;
@@ -321,7 +321,7 @@ static int frequency_show(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
- if (INTEL_GEN(i915) >= 9) {
+ if (GRAPHICS_VER(i915) >= 9) {
reqf >>= 23;
} else {
reqf &= ~GEN6_TURBO_DISABLE;
@@ -354,7 +354,7 @@ static int frequency_show(struct seq_file *m, void *unused)
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
/*
@@ -363,7 +363,7 @@ static int frequency_show(struct seq_file *m, void *unused)
*/
pm_isr = 0;
pm_iir = 0;
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
@@ -386,14 +386,14 @@ static int frequency_show(struct seq_file *m, void *unused)
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_mask);
- if (INTEL_GEN(i915) <= 10)
+ if (GRAPHICS_VER(i915) <= 10)
seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
pm_isr, pm_iir);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
+ (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -437,20 +437,20 @@ static int frequency_show(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
max_freq *= (IS_GEN9_BC(i915) ||
- INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(i915) ||
- INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(i915) ||
- INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -488,7 +488,7 @@ static int llc_show(struct seq_file *m, void *data)
{
struct intel_gt *gt = m->private;
struct drm_i915_private *i915 = gt->i915;
- const bool edram = INTEL_GEN(i915) > 8;
+ const bool edram = GRAPHICS_VER(i915) > 8;
struct intel_rps *rps = &gt->rps;
unsigned int max_gpu_freq, min_gpu_freq;
intel_wakeref_t wakeref;
@@ -500,7 +500,7 @@ static int llc_show(struct seq_file *m, void *data)
min_gpu_freq = rps->min_freq;
max_gpu_freq = rps->max_freq;
- if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq /= GEN9_FREQ_SCALER;
max_gpu_freq /= GEN9_FREQ_SCALER;
@@ -518,7 +518,7 @@ static int llc_show(struct seq_file *m, void *data)
intel_gpu_freq(rps,
(gpu_freq *
(IS_GEN9_BC(i915) ||
- INTEL_GEN(i915) >= 10 ?
+ GRAPHICS_VER(i915) >= 10 ?
GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
@@ -580,7 +580,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
- if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
+ if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) {
struct intel_uncore *uncore = gt->uncore;
u32 rpup, rpupei;
u32 rpdown, rpdownei;
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 9646200d2792..61383830505e 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -74,7 +74,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
- if (IS_G4X(rq->engine->i915) || IS_GEN(rq->engine->i915, 5))
+ if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5)
cmd |= MI_INVALIDATE_ISP;
}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index e08dff376339..1aee5e6b1b23 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -96,9 +96,8 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
* entries back to scratch.
*/
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
memset32(vaddr + pte, scratch_pte, count);
- kunmap_atomic(vaddr);
pte = 0;
}
@@ -120,7 +119,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
GEM_BUG_ON(!pd->entry[act_pt]);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
+ vaddr = px_vaddr(i915_pt_entry(pd, act_pt));
do {
GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE);
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -136,12 +135,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
}
if (++act_pte == GEN6_PTES) {
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
+ vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt));
act_pte = 0;
}
} while (1);
- kunmap_atomic(vaddr);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
@@ -235,7 +232,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
goto err_scratch0;
}
- ret = pin_pt_dma(vm, vm->scratch[1]);
+ ret = map_pt_dma(vm, vm->scratch[1]);
if (ret)
goto err_scratch1;
@@ -346,7 +343,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
if (!vma)
return ERR_PTR(-ENOMEM);
- i915_active_init(&vma->active, NULL, NULL);
+ i915_active_init(&vma->active, NULL, NULL, 0);
kref_init(&vma->ref);
mutex_init(&vma->pages_mutex);
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index de575fdb033f..21f08e53889c 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma,
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
- batch_add(&cmds, 0xffff0000);
+ batch_add(&cmds, 0xffff0000 |
+ ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
+ HIZ_RAW_STALL_OPT_DISABLE :
+ 0));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
gen7_emit_pipeline_invalidate(&cmds);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 732c2ed1d933..94e0a5669f90 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -38,7 +38,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN(rq->engine->i915, 9))
+ if (GRAPHICS_VER(rq->engine->i915) == 9)
vf_flush_wa = true;
/* WaForGAMHang:kbl */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 74bf6fc8461f..21c8b7350b7a 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -242,11 +242,10 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
atomic_read(&pt->used));
GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
memset64(vaddr + gen8_pd_index(start, 0),
vm->scratch[0]->encode,
count);
- kunmap_atomic(vaddr);
atomic_sub(count, &pt->used);
start += count;
@@ -375,7 +374,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
gen8_pte_t *vaddr;
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
@@ -402,12 +401,10 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
}
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
return idx;
}
@@ -442,7 +439,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
encode |= GEN8_PDE_PS_2M;
page_size = I915_GTT_PAGE_SIZE_2M;
- vaddr = kmap_atomic_px(pd);
+ vaddr = px_vaddr(pd);
} else {
struct i915_page_table *pt =
i915_pt_entry(pd, __gen8_pte_index(start, 1));
@@ -457,7 +454,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
maybe_64K = __gen8_pte_index(start, 1);
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
}
do {
@@ -491,7 +488,6 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
} while (rem >= page_size && index < I915_PDES);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
/*
* Is it safe to mark the 2M block as 64K? -- Either we have
@@ -505,9 +501,8 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
!iter->sg && IS_ALIGNED(vma->node.start +
vma->node.size,
I915_GTT_PAGE_SIZE_2M)))) {
- vaddr = kmap_atomic_px(pd);
+ vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
- kunmap_atomic(vaddr);
page_size = I915_GTT_PAGE_SIZE_64K;
/*
@@ -523,12 +518,11 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
u16 i;
encode = vma->vm->scratch[0]->encode;
- vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
+ vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
- kunmap_atomic(vaddr);
}
}
@@ -602,7 +596,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (IS_ERR(obj))
goto free_scratch;
- ret = pin_pt_dma(vm, obj);
+ ret = map_pt_dma(vm, obj);
if (ret) {
i915_gem_object_put(obj);
goto free_scratch;
@@ -639,7 +633,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
if (IS_ERR(pde))
return PTR_ERR(pde);
- err = pin_pt_dma(vm, pde->pt.base);
+ err = map_pt_dma(vm, pde->pt.base);
if (err) {
free_pd(vm, pde);
return err;
@@ -674,7 +668,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm)
goto err_pd;
}
- err = pin_pt_dma(vm, pd->pt.base);
+ err = map_pt_dma(vm, pd->pt.base);
if (err)
goto err_pd;
@@ -715,9 +709,12 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
*
* Gen12 has inherited the same read-only fault issue from gen11.
*/
- ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
+ ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
- ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
+ if (HAS_LMEM(gt->i915))
+ ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
+ else
+ ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
err = gen8_init_scratch(&ppgtt->vm);
if (err)
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
index 76a08b9c1f5c..b9028c2ad3c7 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
@@ -6,8 +6,15 @@
#ifndef __GEN8_PPGTT_H__
#define __GEN8_PPGTT_H__
+#include <linux/kernel.h>
+
+struct i915_address_space;
struct intel_gt;
+enum i915_cache_level;
struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt);
+u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags);
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 17cf2640b082..4033184f13b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -326,7 +326,6 @@ void intel_context_unpin(struct intel_context *ce)
intel_context_put(ce);
}
-__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
@@ -385,7 +384,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
mutex_init(&ce->pin_mutex);
i915_active_init(&ce->active,
- __intel_context_active, __intel_context_retire);
+ __intel_context_active, __intel_context_retire, 0);
}
void intel_context_fini(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index 8dfd8f656aaa..e86d8255feec 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -76,7 +76,7 @@ intel_context_reconfigure_sseu(struct intel_context *ce,
{
int ret;
- GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
+ GEM_BUG_ON(GRAPHICS_VER(ce->engine->i915) < 8);
ret = intel_context_lock_pinned(ce);
if (ret)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 47ee8578e511..8d9184920c51 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -13,8 +13,9 @@
#include "i915_reg.h"
#include "i915_request.h"
#include "i915_selftest.h"
-#include "gt/intel_timeline.h"
#include "intel_engine_types.h"
+#include "intel_gt_types.h"
+#include "intel_timeline.h"
#include "intel_workarounds.h"
struct drm_printer;
@@ -262,6 +263,11 @@ void intel_engine_init_active(struct intel_engine_cs *engine,
#define ENGINE_MOCK 1
#define ENGINE_VIRTUAL 2
+static inline bool intel_engine_uses_guc(const struct intel_engine_cs *engine)
+{
+ return engine->gt->submission_method >= INTEL_SUBMISSION_GUC;
+}
+
static inline bool
intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index efe935f80c1a..9ceddfbb1687 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -45,9 +45,9 @@ struct engine_info {
unsigned int hw_id;
u8 class;
u8 instance;
- /* mmio bases table *must* be sorted in reverse gen order */
+ /* mmio bases table *must* be sorted in reverse graphics_ver order */
struct engine_mmio_base {
- u32 gen : 8;
+ u32 graphics_ver : 8;
u32 base : 24;
} mmio_bases[MAX_MMIO_BASES];
};
@@ -58,7 +58,7 @@ static const struct engine_info intel_engines[] = {
.class = RENDER_CLASS,
.instance = 0,
.mmio_bases = {
- { .gen = 1, .base = RENDER_RING_BASE }
+ { .graphics_ver = 1, .base = RENDER_RING_BASE }
},
},
[BCS0] = {
@@ -66,7 +66,7 @@ static const struct engine_info intel_engines[] = {
.class = COPY_ENGINE_CLASS,
.instance = 0,
.mmio_bases = {
- { .gen = 6, .base = BLT_RING_BASE }
+ { .graphics_ver = 6, .base = BLT_RING_BASE }
},
},
[VCS0] = {
@@ -74,9 +74,9 @@ static const struct engine_info intel_engines[] = {
.class = VIDEO_DECODE_CLASS,
.instance = 0,
.mmio_bases = {
- { .gen = 11, .base = GEN11_BSD_RING_BASE },
- { .gen = 6, .base = GEN6_BSD_RING_BASE },
- { .gen = 4, .base = BSD_RING_BASE }
+ { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE },
+ { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE },
+ { .graphics_ver = 4, .base = BSD_RING_BASE }
},
},
[VCS1] = {
@@ -84,8 +84,8 @@ static const struct engine_info intel_engines[] = {
.class = VIDEO_DECODE_CLASS,
.instance = 1,
.mmio_bases = {
- { .gen = 11, .base = GEN11_BSD2_RING_BASE },
- { .gen = 8, .base = GEN8_BSD2_RING_BASE }
+ { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE },
+ { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE }
},
},
[VCS2] = {
@@ -93,7 +93,7 @@ static const struct engine_info intel_engines[] = {
.class = VIDEO_DECODE_CLASS,
.instance = 2,
.mmio_bases = {
- { .gen = 11, .base = GEN11_BSD3_RING_BASE }
+ { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE }
},
},
[VCS3] = {
@@ -101,7 +101,7 @@ static const struct engine_info intel_engines[] = {
.class = VIDEO_DECODE_CLASS,
.instance = 3,
.mmio_bases = {
- { .gen = 11, .base = GEN11_BSD4_RING_BASE }
+ { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE }
},
},
[VECS0] = {
@@ -109,8 +109,8 @@ static const struct engine_info intel_engines[] = {
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 0,
.mmio_bases = {
- { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
- { .gen = 7, .base = VEBOX_RING_BASE }
+ { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE },
+ { .graphics_ver = 7, .base = VEBOX_RING_BASE }
},
},
[VECS1] = {
@@ -118,7 +118,7 @@ static const struct engine_info intel_engines[] = {
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 1,
.mmio_bases = {
- { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
+ { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE }
},
},
};
@@ -146,9 +146,9 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
switch (class) {
case RENDER_CLASS:
- switch (INTEL_GEN(gt->i915)) {
+ switch (GRAPHICS_VER(gt->i915)) {
default:
- MISSING_CASE(INTEL_GEN(gt->i915));
+ MISSING_CASE(GRAPHICS_VER(gt->i915));
return DEFAULT_LR_CONTEXT_RENDER_SIZE;
case 12:
case 11:
@@ -184,8 +184,8 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
*/
cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
drm_dbg(&gt->i915->drm,
- "gen%d CXT_SIZE = %d bytes [0x%08x]\n",
- INTEL_GEN(gt->i915), cxt_size * 64,
+ "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
+ GRAPHICS_VER(gt->i915), cxt_size * 64,
cxt_size - 1);
return round_up(cxt_size * 64, PAGE_SIZE);
case 3:
@@ -201,7 +201,7 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
- if (INTEL_GEN(gt->i915) < 8)
+ if (GRAPHICS_VER(gt->i915) < 8)
return 0;
return GEN8_LR_CONTEXT_OTHER_SIZE;
}
@@ -213,7 +213,7 @@ static u32 __engine_mmio_base(struct drm_i915_private *i915,
int i;
for (i = 0; i < MAX_MMIO_BASES; i++)
- if (INTEL_GEN(i915) >= bases[i].gen)
+ if (GRAPHICS_VER(i915) >= bases[i].graphics_ver)
break;
GEM_BUG_ON(i == MAX_MMIO_BASES);
@@ -240,10 +240,10 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
* Though they added more rings on g4x/ilk, they did not add
* per-engine HWSTAM until gen6.
*/
- if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
+ if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
return;
- if (INTEL_GEN(engine->i915) >= 3)
+ if (GRAPHICS_VER(engine->i915) >= 3)
ENGINE_WRITE(engine, RING_HWSTAM, mask);
else
ENGINE_WRITE16(engine, RING_HWSTAM, mask);
@@ -255,11 +255,17 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u);
}
+static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ GEM_DEBUG_WARN_ON(iir);
+}
+
static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
+ u8 guc_class;
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
@@ -288,9 +294,12 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->i915 = i915;
engine->gt = gt;
engine->uncore = gt->uncore;
- engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
engine->hw_id = info->hw_id;
- engine->guc_id = MAKE_GUC_ID(info->class, info->instance);
+ guc_class = engine_class_to_guc_class(info->class);
+ engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
+ engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
+
+ engine->irq_handler = nop_irq_handler;
engine->class = info->class;
engine->instance = info->instance;
@@ -308,7 +317,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
CONFIG_DRM_I915_TIMESLICE_DURATION;
/* Override to uninterruptible for OpenCL workloads. */
- if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
+ if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
engine->props.preempt_timeout_ms = 0;
engine->defaults = engine->props; /* never to change again */
@@ -345,8 +354,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
* HEVC support is present on first engine instance
* before Gen11 and on all instances afterwards.
*/
- if (INTEL_GEN(i915) >= 11 ||
- (INTEL_GEN(i915) >= 9 && engine->instance == 0))
+ if (GRAPHICS_VER(i915) >= 11 ||
+ (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_CLASS_CAPABILITY_HEVC;
@@ -354,14 +363,14 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
* SFC block is present only on even logical engine
* instances.
*/
- if ((INTEL_GEN(i915) >= 11 &&
+ if ((GRAPHICS_VER(i915) >= 11 &&
(engine->gt->info.vdbox_sfc_access &
BIT(engine->instance))) ||
- (INTEL_GEN(i915) >= 9 && engine->instance == 0))
+ (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
} else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
- if (INTEL_GEN(i915) >= 9)
+ if (GRAPHICS_VER(i915) >= 9)
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
}
@@ -459,7 +468,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
- if (INTEL_GEN(i915) < 11)
+ if (GRAPHICS_VER(i915) < 11)
return info->engine_mask;
media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
@@ -485,7 +494,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
* hooked up to an SFC (Scaler & Format Converter) unit.
* In TGL each VDBOX has access to an SFC.
*/
- if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0)
+ if (GRAPHICS_VER(i915) >= 12 || logical_vdbox++ % 2 == 0)
gt->info.vdbox_sfc_access |= BIT(i);
}
drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
@@ -722,7 +731,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_whitelist(engine);
intel_engine_init_ctx_wa(engine);
- if (INTEL_GEN(engine->i915) >= 12)
+ if (GRAPHICS_VER(engine->i915) >= 12)
engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
return 0;
@@ -898,7 +907,7 @@ static int engine_init_common(struct intel_engine_cs *engine)
return 0;
err_context:
- intel_context_put(ce);
+ destroy_pinned_context(ce);
return ret;
}
@@ -909,12 +918,16 @@ int intel_engines_init(struct intel_gt *gt)
enum intel_engine_id id;
int err;
- if (intel_uc_uses_guc_submission(&gt->uc))
+ if (intel_uc_uses_guc_submission(&gt->uc)) {
+ gt->submission_method = INTEL_SUBMISSION_GUC;
setup = intel_guc_submission_setup;
- else if (HAS_EXECLISTS(gt->i915))
+ } else if (HAS_EXECLISTS(gt->i915)) {
+ gt->submission_method = INTEL_SUBMISSION_ELSP;
setup = intel_execlists_submission_setup;
- else
+ } else {
+ gt->submission_method = INTEL_SUBMISSION_RING;
setup = intel_ring_submission_setup;
+ }
for_each_engine(engine, gt, id) {
err = engine_setup_common(engine);
@@ -986,9 +999,9 @@ u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
u64 acthd;
- if (INTEL_GEN(i915) >= 8)
+ if (GRAPHICS_VER(i915) >= 8)
acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
- else if (INTEL_GEN(i915) >= 4)
+ else if (GRAPHICS_VER(i915) >= 4)
acthd = ENGINE_READ(engine, RING_ACTHD);
else
acthd = ENGINE_READ(engine, ACTHD);
@@ -1000,7 +1013,7 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{
u64 bbaddr;
- if (INTEL_GEN(engine->i915) >= 8)
+ if (GRAPHICS_VER(engine->i915) >= 8)
bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
else
bbaddr = ENGINE_READ(engine, RING_BBADDR);
@@ -1047,7 +1060,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
int err = 0;
- if (INTEL_GEN(engine->i915) < 3)
+ if (GRAPHICS_VER(engine->i915) < 3)
return -ENODEV;
ENGINE_TRACE(engine, "\n");
@@ -1097,7 +1110,7 @@ read_subslice_reg(const struct intel_engine_cs *engine,
u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
enum forcewake_domains fw_domains;
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
} else {
@@ -1146,7 +1159,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
memset(instdone, 0, sizeof(*instdone));
- switch (INTEL_GEN(i915)) {
+ switch (GRAPHICS_VER(i915)) {
default:
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
@@ -1156,7 +1169,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
- if (INTEL_GEN(i915) >= 12) {
+ if (GRAPHICS_VER(i915) >= 12) {
instdone->slice_common_extra[0] =
intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
instdone->slice_common_extra[1] =
@@ -1219,7 +1232,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
- if (INTEL_GEN(engine->i915) > 2 &&
+ if (GRAPHICS_VER(engine->i915) > 2 &&
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
@@ -1316,7 +1329,7 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
- switch (INTEL_GEN(engine->i915)) {
+ switch (GRAPHICS_VER(engine->i915)) {
case 2:
return false; /* uses physical not virtual addresses */
case 3:
@@ -1421,7 +1434,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct intel_engine_execlists * const execlists = &engine->execlists;
u64 addr;
- if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
+ if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
if (HAS_EXECLISTS(dev_priv)) {
drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
@@ -1438,13 +1451,13 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
- if (INTEL_GEN(engine->i915) > 2) {
+ if (GRAPHICS_VER(engine->i915) > 2) {
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
ENGINE_READ(engine, RING_MI_MODE),
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (GRAPHICS_VER(dev_priv) >= 6) {
drm_printf(m, "\tRING_IMR: 0x%08x\n",
ENGINE_READ(engine, RING_IMR));
drm_printf(m, "\tRING_ESR: 0x%08x\n",
@@ -1461,15 +1474,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (INTEL_GEN(dev_priv) >= 8)
+ if (GRAPHICS_VER(dev_priv) >= 8)
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
- else if (INTEL_GEN(dev_priv) >= 4)
+ else if (GRAPHICS_VER(dev_priv) >= 4)
addr = ENGINE_READ(engine, RING_DMA_FADD);
else
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (GRAPHICS_VER(dev_priv) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
@@ -1479,7 +1492,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (intel_engine_in_guc_submission_mode(engine)) {
+ if (intel_engine_uses_guc(engine)) {
/* nothing to print yet */
} else if (HAS_EXECLISTS(dev_priv)) {
struct i915_request * const *port, *rq;
@@ -1548,7 +1561,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
rcu_read_unlock();
execlists_active_unlock_bh(execlists);
- } else if (INTEL_GEN(dev_priv) > 6) {
+ } else if (GRAPHICS_VER(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 7c9af86fdb1e..47f4397095e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -23,7 +23,7 @@ static void dbg_poison_ce(struct intel_context *ce)
if (ce->state) {
struct drm_i915_gem_object *obj = ce->state->obj;
- int type = i915_coherent_map_type(ce->engine->i915);
+ int type = i915_coherent_map_type(ce->engine->i915, obj, true);
void *map;
if (!i915_gem_object_trylock(obj))
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 883bafc44902..e113f93b3274 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -402,6 +402,7 @@ struct intel_engine_cs {
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
void (*irq_enable)(struct intel_engine_cs *engine);
void (*irq_disable)(struct intel_engine_cs *engine);
+ void (*irq_handler)(struct intel_engine_cs *engine, u16 iir);
void (*sanitize)(struct intel_engine_cs *engine);
int (*resume)(struct intel_engine_cs *engine);
@@ -481,10 +482,9 @@ struct intel_engine_cs {
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_HAS_TIMESLICES BIT(4)
-#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5)
-#define I915_ENGINE_IS_VIRTUAL BIT(6)
-#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7)
-#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8)
+#define I915_ENGINE_IS_VIRTUAL BIT(5)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
unsigned int flags;
/*
@@ -594,12 +594,6 @@ intel_engine_has_timeslices(const struct intel_engine_cs *engine)
}
static inline bool
-intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
-}
-
-static inline bool
intel_engine_is_virtual(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_IS_VIRTUAL;
@@ -612,10 +606,10 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
}
#define instdone_has_slice(dev_priv___, sseu___, slice___) \
- ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
+ ((GRAPHICS_VER(dev_priv___) == 7 ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
- (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
+ (GRAPHICS_VER(dev_priv__) == 7 ? (1 & BIT(subslice__)) : \
intel_sseu_has_subslice(sseu__, 0, subslice__))
#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index de124870af44..fc77592d88a9 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -118,6 +118,7 @@
#include "intel_engine_stats.h"
#include "intel_execlists_submission.h"
#include "intel_gt.h"
+#include "intel_gt_irq.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_lrc.h"
@@ -1768,7 +1769,6 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
*/
GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
!reset_in_progress(execlists));
- GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
@@ -1847,7 +1847,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
head, upper_32_bits(csb), lower_32_bits(csb));
- if (INTEL_GEN(engine->i915) >= 12)
+ if (GRAPHICS_VER(engine->i915) >= 12)
promote = gen12_csb_parse(csb);
else
promote = gen8_csb_parse(csb);
@@ -2385,6 +2385,45 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
rcu_read_unlock();
}
+static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ bool tasklet = false;
+
+ if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
+ u32 eir;
+
+ /* Upper 16b are the enabling mask, rsvd for internal errors */
+ eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
+ ENGINE_TRACE(engine, "CS error: %x\n", eir);
+
+ /* Disable the error interrupt until after the reset */
+ if (likely(eir)) {
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, eir);
+ WRITE_ONCE(engine->execlists.error_interrupt, eir);
+ tasklet = true;
+ }
+ }
+
+ if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+ WRITE_ONCE(engine->execlists.yield,
+ ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
+ ENGINE_TRACE(engine, "semaphore yield: %08x\n",
+ engine->execlists.yield);
+ if (del_timer(&engine->execlists.timer))
+ tasklet = true;
+ }
+
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+ tasklet = true;
+
+ if (iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_signal_breadcrumbs(engine);
+
+ if (tasklet)
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
static void __execlists_kick(struct intel_engine_execlists *execlists)
{
/* Kick the tasklet for some interrupt coalescing and reset handling */
@@ -2733,7 +2772,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
- if (INTEL_GEN(engine->i915) >= 11)
+ if (GRAPHICS_VER(engine->i915) >= 11)
mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
else
mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
@@ -3064,7 +3103,7 @@ static void execlists_park(struct intel_engine_cs *engine)
static bool can_preempt(struct intel_engine_cs *engine)
{
- if (INTEL_GEN(engine->i915) > 8)
+ if (GRAPHICS_VER(engine->i915) > 8)
return true;
/* GPGPU on bdw requires extra w/a; not implemented */
@@ -3076,29 +3115,6 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = execlists_submit_request;
engine->schedule = i915_schedule;
engine->execlists.tasklet.callback = execlists_submission_tasklet;
-
- engine->reset.prepare = execlists_reset_prepare;
- engine->reset.rewind = execlists_reset_rewind;
- engine->reset.cancel = execlists_reset_cancel;
- engine->reset.finish = execlists_reset_finish;
-
- engine->park = execlists_park;
- engine->unpark = NULL;
-
- engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (!intel_vgpu_active(engine->i915)) {
- engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (can_preempt(engine)) {
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
- if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- engine->flags |= I915_ENGINE_HAS_TIMESLICES;
- }
- }
-
- if (intel_engine_has_preemption(engine))
- engine->emit_bb_start = gen8_emit_bb_start;
- else
- engine->emit_bb_start = gen8_emit_bb_start_noarb;
}
static void execlists_shutdown(struct intel_engine_cs *engine)
@@ -3129,16 +3145,24 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->cops = &execlists_context_ops;
engine->request_alloc = execlists_request_alloc;
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.rewind = execlists_reset_rewind;
+ engine->reset.cancel = execlists_reset_cancel;
+ engine->reset.finish = execlists_reset_finish;
+
+ engine->park = execlists_park;
+ engine->unpark = NULL;
+
engine->emit_flush = gen8_emit_flush_xcs;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
- if (INTEL_GEN(engine->i915) >= 12) {
+ if (GRAPHICS_VER(engine->i915) >= 12) {
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
engine->emit_flush = gen12_emit_flush_xcs;
}
engine->set_default_submission = execlists_set_default_submission;
- if (INTEL_GEN(engine->i915) < 11) {
+ if (GRAPHICS_VER(engine->i915) < 11) {
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
} else {
@@ -3149,13 +3173,29 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
* until a more refined solution exists.
*/
}
+ intel_engine_set_irq_handler(engine, execlists_irq_handler);
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+ if (!intel_vgpu_active(engine->i915)) {
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ if (can_preempt(engine)) {
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ }
+ }
+
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
}
static void logical_ring_default_irqs(struct intel_engine_cs *engine)
{
unsigned int shift = 0;
- if (INTEL_GEN(engine->i915) < 11) {
+ if (GRAPHICS_VER(engine->i915) < 11) {
const u8 irq_shifts[] = {
[RCS0] = GEN8_RCS_IRQ_SHIFT,
[BCS0] = GEN8_BCS_IRQ_SHIFT,
@@ -3175,7 +3215,7 @@ static void logical_ring_default_irqs(struct intel_engine_cs *engine)
static void rcs_submission_override(struct intel_engine_cs *engine)
{
- switch (INTEL_GEN(engine->i915)) {
+ switch (GRAPHICS_VER(engine->i915)) {
case 12:
engine->emit_flush = gen12_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
@@ -3226,13 +3266,13 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
execlists->csb_write =
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
- if (INTEL_GEN(i915) < 11)
+ if (GRAPHICS_VER(i915) < 11)
execlists->csb_size = GEN8_CSB_ENTRIES;
else
execlists->csb_size = GEN11_CSB_ENTRIES;
engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
- if (INTEL_GEN(engine->i915) >= 11) {
+ if (GRAPHICS_VER(engine->i915) >= 11) {
execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
}
@@ -3884,13 +3924,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&engine->active.lock, flags);
}
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
-{
- return engine->set_default_submission ==
- execlists_set_default_submission;
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_execlists.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
index fd61dae820e9..4ca9b475e252 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -43,7 +43,4 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
const struct intel_engine_cs *master,
const struct intel_engine_cs *sibling);
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
-
#endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 670c1271e7d5..20e46b843324 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -18,6 +18,7 @@
#include "i915_vgpu.h"
#include "intel_gtt.h"
+#include "gen8_ppgtt.h"
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
@@ -106,10 +107,10 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
if (!intel_vtd_active())
return false;
- if (IS_GEN(i915, 5) && IS_MOBILE(i915))
+ if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
return true;
- if (IS_GEN(i915, 12))
+ if (GRAPHICS_VER(i915) == 12)
return true; /* XXX DMAR fault reason 7 */
return false;
@@ -175,7 +176,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
- if (INTEL_GEN(i915) >= 12)
+ if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
GEN12_GUC_TLB_INV_CR_INVALIDATE);
else
@@ -187,9 +188,9 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
intel_gtt_chipset_flush();
}
-static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
+u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen8_pte_t pte = addr | _PAGE_PRESENT;
@@ -657,7 +658,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
goto err_ppgtt;
i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
i915_gem_object_unlock(ppgtt->vm.scratch[0]);
if (err)
goto err_stash;
@@ -745,7 +746,6 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
mutex_unlock(&ggtt->vm.mutex);
i915_address_space_fini(&ggtt->vm);
- dma_resv_fini(&ggtt->vm.resv);
arch_phys_wc_del(ggtt->mtrr);
@@ -767,6 +767,19 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915)
ggtt_cleanup_hw(ggtt);
}
+/**
+ * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
+ * all free objects have been drained.
+ * @i915: i915 device
+ */
+void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
+ dma_resv_fini(&ggtt->vm._resv);
+}
+
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
@@ -819,7 +832,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+ if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 10)
ggtt->gsm = ioremap(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -828,6 +841,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
+ kref_init(&ggtt->vm.resv_ref);
ret = setup_scratch_page(&ggtt->vm);
if (ret) {
drm_err(&i915->drm, "Scratch setup failed\n");
@@ -906,9 +920,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
- /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
- if (intel_ggtt_update_needs_vtd_wa(i915) ||
- IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
+ /*
+ * Serialize GTT updates with aperture access on BXT if VT-d is on,
+ * and always on CHV.
+ */
+ if (intel_vm_no_concurrent_access_wa(i915)) {
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
ggtt->vm.bind_async_flags =
@@ -1062,7 +1078,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(i915))
ggtt->vm.pte_encode = byt_pte_encode;
- else if (INTEL_GEN(i915) >= 7)
+ else if (GRAPHICS_VER(i915) >= 7)
ggtt->vm.pte_encode = ivb_pte_encode;
else
ggtt->vm.pte_encode = snb_pte_encode;
@@ -1132,16 +1148,16 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
ggtt->vm.gt = gt;
ggtt->vm.i915 = i915;
ggtt->vm.dma = i915->drm.dev;
- dma_resv_init(&ggtt->vm.resv);
+ dma_resv_init(&ggtt->vm._resv);
- if (INTEL_GEN(i915) <= 5)
+ if (GRAPHICS_VER(i915) <= 5)
ret = i915_gmch_probe(ggtt);
- else if (INTEL_GEN(i915) < 8)
+ else if (GRAPHICS_VER(i915) < 8)
ret = gen6_gmch_probe(ggtt);
else
ret = gen8_gmch_probe(ggtt);
if (ret) {
- dma_resv_fini(&ggtt->vm.resv);
+ dma_resv_fini(&ggtt->vm._resv);
return ret;
}
@@ -1193,7 +1209,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
+ if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
return -EIO;
return 0;
@@ -1258,7 +1274,7 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
if (flush)
wbinvd_on_all_cpus();
- if (INTEL_GEN(ggtt->vm.i915) >= 8)
+ if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
setup_private_pat(ggtt->vm.gt->uncore);
intel_ggtt_restore_fences(ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 8a322594210c..cac7f3f44642 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -56,7 +56,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence)
int fence_pitch_shift;
u64 val;
- if (INTEL_GEN(fence_to_i915(fence)) >= 6) {
+ if (GRAPHICS_VER(fence_to_i915(fence)) >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
@@ -173,9 +173,9 @@ static void fence_write(struct i915_fence_reg *fence)
* and explicitly managed for internal users.
*/
- if (IS_GEN(i915, 2))
+ if (GRAPHICS_VER(i915) == 2)
i830_write_fence_reg(fence);
- else if (IS_GEN(i915, 3))
+ else if (GRAPHICS_VER(i915) == 3)
i915_write_fence_reg(fence);
else
i965_write_fence_reg(fence);
@@ -188,7 +188,7 @@ static void fence_write(struct i915_fence_reg *fence)
static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
{
- return INTEL_GEN(fence_to_i915(fence)) < 4;
+ return GRAPHICS_VER(fence_to_i915(fence)) < 4;
}
static int fence_update(struct i915_fence_reg *fence,
@@ -569,7 +569,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) {
+ if (GRAPHICS_VER(i915) >= 8 || IS_VALLEYVIEW(i915)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
@@ -579,7 +579,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (INTEL_GEN(i915) >= 6) {
+ } else if (GRAPHICS_VER(i915) >= 6) {
if (i915->preserve_bios_swizzle) {
if (intel_uncore_read(uncore, DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
@@ -611,14 +611,14 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
- } else if (IS_GEN(i915, 5)) {
+ } else if (GRAPHICS_VER(i915) == 5) {
/*
* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN(i915, 2)) {
+ } else if (GRAPHICS_VER(i915) == 2) {
/*
* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
@@ -653,8 +653,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
- if (intel_uncore_read16(uncore, C0DRB3) ==
- intel_uncore_read16(uncore, C1DRB3)) {
+ if (intel_uncore_read16(uncore, C0DRB3_BW) ==
+ intel_uncore_read16(uncore, C1DRB3_BW)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
@@ -697,7 +697,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN(i915, 4) &&
+ if (GRAPHICS_VER(i915) == 4 &&
!(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -844,10 +844,10 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
if (!i915_ggtt_has_aperture(ggtt))
num_fences = 0;
- else if (INTEL_GEN(i915) >= 7 &&
+ else if (GRAPHICS_VER(i915) >= 7 &&
!(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
num_fences = 32;
- else if (INTEL_GEN(i915) >= 4 ||
+ else if (GRAPHICS_VER(i915) >= 4 ||
IS_I945G(i915) || IS_I945GM(i915) ||
IS_G33(i915) || IS_PINEVIEW(i915))
num_fences = 16;
@@ -867,7 +867,7 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
- i915_active_init(&fence->active, NULL, NULL);
+ i915_active_init(&fence->active, NULL, NULL, 0);
fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
@@ -895,29 +895,29 @@ void intel_gt_init_swizzling(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
- if (INTEL_GEN(i915) < 5 ||
+ if (GRAPHICS_VER(i915) < 5 ||
i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
- if (IS_GEN(i915, 5))
+ if (GRAPHICS_VER(i915) == 5)
return;
intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
- if (IS_GEN(i915, 6))
+ if (GRAPHICS_VER(i915) == 6)
intel_uncore_write(uncore,
ARB_MODE,
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN(i915, 7))
+ else if (GRAPHICS_VER(i915) == 7)
intel_uncore_write(uncore,
ARB_MODE,
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN(i915, 8))
+ else if (GRAPHICS_VER(i915) == 8)
intel_uncore_write(uncore,
GAMTARBMODE,
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(GRAPHICS_VER(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 14e2ffb6c0e5..2694dbb9967e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT*/
/*
- * Copyright � 2003-2018 Intel Corporation
+ * Copyright © 2003-2018 Intel Corporation
*/
#ifndef _INTEL_GPU_COMMANDS_H_
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 8d77dcbad059..2161bf01ef8b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -68,8 +68,6 @@ int intel_gt_probe_lmem(struct intel_gt *gt)
id = INTEL_REGION_LMEM;
mem->id = id;
- mem->type = INTEL_MEMORY_LOCAL;
- mem->instance = 0;
intel_memory_region_set_name(mem, "local%u", mem->instance);
@@ -115,10 +113,10 @@ static void init_unused_rings(struct intel_gt *gt)
init_unused_ring(gt, SRB1_BASE);
init_unused_ring(gt, SRB2_BASE);
init_unused_ring(gt, SRB3_BASE);
- } else if (IS_GEN(i915, 2)) {
+ } else if (GRAPHICS_VER(i915) == 2) {
init_unused_ring(gt, SRB0_BASE);
init_unused_ring(gt, SRB1_BASE);
- } else if (IS_GEN(i915, 3)) {
+ } else if (GRAPHICS_VER(i915) == 3) {
init_unused_ring(gt, PRB1_BASE);
init_unused_ring(gt, PRB2_BASE);
}
@@ -135,7 +133,7 @@ int intel_gt_init_hw(struct intel_gt *gt)
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
+ if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
if (IS_HASWELL(i915))
@@ -208,10 +206,10 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
struct intel_uncore *uncore = gt->uncore;
u32 eir;
- if (!IS_GEN(i915, 2))
+ if (GRAPHICS_VER(i915) != 2)
clear_register(uncore, PGTBL_ER);
- if (INTEL_GEN(i915) < 4)
+ if (GRAPHICS_VER(i915) < 4)
clear_register(uncore, IPEIR(RENDER_RING_BASE));
else
clear_register(uncore, IPEIR_I965);
@@ -229,13 +227,13 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
I915_MASTER_ERROR_INTERRUPT);
}
- if (INTEL_GEN(i915) >= 12) {
+ if (GRAPHICS_VER(i915) >= 12) {
rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
- } else if (INTEL_GEN(i915) >= 6) {
+ } else if (GRAPHICS_VER(i915) >= 6) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -273,7 +271,7 @@ static void gen8_check_faults(struct intel_gt *gt)
i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
u32 fault;
- if (INTEL_GEN(gt->i915) >= 12) {
+ if (GRAPHICS_VER(gt->i915) >= 12) {
fault_reg = GEN12_RING_FAULT_REG;
fault_data0_reg = GEN12_FAULT_TLB_DATA0;
fault_data1_reg = GEN12_FAULT_TLB_DATA1;
@@ -313,9 +311,9 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
- if (INTEL_GEN(i915) >= 8)
+ if (GRAPHICS_VER(i915) >= 8)
gen8_check_faults(gt);
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
gen6_check_faults(gt);
else
return;
@@ -367,7 +365,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
void intel_gt_chipset_flush(struct intel_gt *gt)
{
wmb();
- if (INTEL_GEN(gt->i915) < 6)
+ if (GRAPHICS_VER(gt->i915) < 6)
intel_gtt_chipset_flush();
}
@@ -591,7 +589,8 @@ int intel_gt_init(struct intel_gt *gt)
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
- err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+ err = intel_gt_init_scratch(gt,
+ GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
if (err)
goto out_fw;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index c59468107598..aa0a59c5b614 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -98,7 +98,6 @@ static void pool_free_work(struct work_struct *wrk)
round_jiffies_up_relative(HZ));
}
-__i915_active_call
static void pool_retire(struct i915_active *ref)
{
struct intel_gt_buffer_pool_node *node =
@@ -154,7 +153,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz,
node->age = 0;
node->pool = pool;
node->pinned = false;
- i915_active_init(&node->active, NULL, pool_retire);
+ i915_active_init(&node->active, NULL, pool_retire, 0);
obj = i915_gem_object_create_internal(gt->i915, sz);
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 582fcaee11aa..9f0e729d2d15 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -76,7 +76,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
u32 f19_2_mhz = 19200000;
u32 f24_mhz = 24000000;
- if (INTEL_GEN(uncore->i915) <= 4) {
+ if (GRAPHICS_VER(uncore->i915) <= 4) {
/*
* PRMs say:
*
@@ -85,7 +85,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
* (“CLKCFG”) MCHBAR register)
*/
return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
- } else if (INTEL_GEN(uncore->i915) <= 8) {
+ } else if (GRAPHICS_VER(uncore->i915) <= 8) {
/*
* PRMs say:
*
@@ -94,7 +94,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
* rolling over every 1.5 hours).
*/
return f12_5_mhz;
- } else if (INTEL_GEN(uncore->i915) <= 9) {
+ } else if (GRAPHICS_VER(uncore->i915) <= 9) {
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
@@ -113,7 +113,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
}
return freq;
- } else if (INTEL_GEN(uncore->i915) <= 12) {
+ } else if (GRAPHICS_VER(uncore->i915) <= 12) {
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
@@ -128,7 +128,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
} else {
u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
- if (INTEL_GEN(uncore->i915) <= 10)
+ if (GRAPHICS_VER(uncore->i915) <= 10)
freq = gen10_get_crystal_clock_freq(uncore, c0);
else
freq = gen11_get_crystal_clock_freq(uncore, c0);
@@ -211,7 +211,7 @@ u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
* frozen machine.
*/
val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
- if (IS_GEN(gt->i915, 6))
+ if (GRAPHICS_VER(gt->i915) == 6)
val = div_u64_roundup(val, 25) * 25;
return val;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 9fc6c912a4e5..c13462274fe8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -20,48 +20,6 @@ static void guc_irq_handler(struct intel_guc *guc, u16 iir)
intel_guc_to_host_event_handler(guc);
}
-static void
-cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
-{
- bool tasklet = false;
-
- if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
- u32 eir;
-
- /* Upper 16b are the enabling mask, rsvd for internal errors */
- eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
- ENGINE_TRACE(engine, "CS error: %x\n", eir);
-
- /* Disable the error interrupt until after the reset */
- if (likely(eir)) {
- ENGINE_WRITE(engine, RING_EMR, ~0u);
- ENGINE_WRITE(engine, RING_EIR, eir);
- WRITE_ONCE(engine->execlists.error_interrupt, eir);
- tasklet = true;
- }
- }
-
- if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
- WRITE_ONCE(engine->execlists.yield,
- ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
- ENGINE_TRACE(engine, "semaphore yield: %08x\n",
- engine->execlists.yield);
- if (del_timer(&engine->execlists.timer))
- tasklet = true;
- }
-
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
- tasklet = true;
-
- if (iir & GT_RENDER_USER_INTERRUPT) {
- intel_engine_signal_breadcrumbs(engine);
- tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
- }
-
- if (tasklet)
- tasklet_hi_schedule(&engine->execlists.tasklet);
-}
-
static u32
gen11_gt_engine_identity(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit)
@@ -122,7 +80,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
engine = NULL;
if (likely(engine))
- return cs_irq_handler(engine, iir);
+ return intel_engine_cs_irq(engine, iir);
WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
class, instance);
@@ -236,14 +194,18 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
- const u32 irqs =
- GT_CS_MASTER_ERROR_INTERRUPT |
- GT_RENDER_USER_INTERRUPT |
- GT_CONTEXT_SWITCH_INTERRUPT |
- GT_WAIT_SEMAPHORE_INTERRUPT;
struct intel_uncore *uncore = gt->uncore;
- const u32 dmask = irqs << 16 | irqs;
- const u32 smask = irqs << 16;
+ u32 irqs = GT_RENDER_USER_INTERRUPT;
+ u32 dmask;
+ u32 smask;
+
+ if (!intel_uc_wants_guc_submission(&gt->uc))
+ irqs |= GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
+
+ dmask = irqs << 16 | irqs;
+ smask = irqs << 16;
BUILD_BUG_ON(irqs & 0xffff0000);
@@ -275,9 +237,12 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
+ intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+ gt_iir);
+
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ gt_iir);
}
static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
@@ -301,11 +266,16 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
+ intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+ gt_iir);
+
if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ gt_iir >> 12);
+
if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]);
+ intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
+ gt_iir >> 22);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -324,10 +294,10 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
iir = raw_reg_read(regs, GEN8_GT_IIR(0));
if (likely(iir)) {
- cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
- iir >> GEN8_RCS_IRQ_SHIFT);
- cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
- iir >> GEN8_BCS_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+ iir >> GEN8_RCS_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
+ iir >> GEN8_BCS_IRQ_SHIFT);
raw_reg_write(regs, GEN8_GT_IIR(0), iir);
}
}
@@ -335,10 +305,10 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
iir = raw_reg_read(regs, GEN8_GT_IIR(1));
if (likely(iir)) {
- cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
- iir >> GEN8_VCS0_IRQ_SHIFT);
- cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
- iir >> GEN8_VCS1_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ iir >> GEN8_VCS0_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
+ iir >> GEN8_VCS1_IRQ_SHIFT);
raw_reg_write(regs, GEN8_GT_IIR(1), iir);
}
}
@@ -346,8 +316,8 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
if (master_ctl & GEN8_GT_VECS_IRQ) {
iir = raw_reg_read(regs, GEN8_GT_IIR(3));
if (likely(iir)) {
- cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
- iir >> GEN8_VECS_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+ iir >> GEN8_VECS_IRQ_SHIFT);
raw_reg_write(regs, GEN8_GT_IIR(3), iir);
}
}
@@ -429,7 +399,7 @@ void gen5_gt_irq_reset(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
GEN3_IRQ_RESET(uncore, GT);
- if (INTEL_GEN(gt->i915) >= 6)
+ if (GRAPHICS_VER(gt->i915) >= 6)
GEN3_IRQ_RESET(uncore, GEN6_PM);
}
@@ -447,14 +417,14 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt)
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN(gt->i915, 5))
+ if (GRAPHICS_VER(gt->i915) == 5)
gt_irqs |= ILK_BSD_USER_INTERRUPT;
else
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
- if (INTEL_GEN(gt->i915) >= 6) {
+ if (GRAPHICS_VER(gt->i915) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
index f667e976fb2b..41cad38668c5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#include "intel_engine_types.h"
+
struct intel_gt;
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
@@ -39,4 +41,25 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl);
void gen8_gt_irq_reset(struct intel_gt *gt);
void gen8_gt_irq_postinstall(struct intel_gt *gt);
+static inline void intel_engine_cs_irq(struct intel_engine_cs *engine, u16 iir)
+{
+ if (iir)
+ engine->irq_handler(engine, iir);
+}
+
+static inline void
+intel_engine_set_irq_handler(struct intel_engine_cs *engine,
+ void (*fn)(struct intel_engine_cs *engine,
+ u16 iir))
+{
+ /*
+ * As the interrupt is live as allocate and setup the engines,
+ * err on the side of caution and apply barriers to updating
+ * the irq handler callback. This assures that when we do use
+ * the engine, we will receive interrupts only to ourselves,
+ * and not lose any.
+ */
+ smp_store_mb(engine->irq_handler, fn);
+}
+
#endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
index 811a11ed181c..fe51f894b073 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -16,10 +16,10 @@ static void write_pm_imr(struct intel_gt *gt)
u32 mask = gt->pm_imr;
i915_reg_t reg;
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
mask <<= 16; /* pm is in upper half */
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
reg = GEN8_GT_IMR(2);
} else {
reg = GEN6_PMIMR;
@@ -61,7 +61,7 @@ void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask)
void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
{
struct intel_uncore *uncore = gt->uncore;
- i915_reg_t reg = INTEL_GEN(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+ i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
lockdep_assert_held(&gt->irq_lock);
@@ -77,10 +77,10 @@ static void write_pm_ier(struct intel_gt *gt)
u32 mask = gt->pm_ier;
i915_reg_t reg;
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
mask <<= 16; /* pm is in upper half */
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
reg = GEN8_GT_IER(2);
} else {
reg = GEN6_PMIER;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 0caf6ca0a784..fecfacf551d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -31,6 +31,12 @@ struct i915_ggtt;
struct intel_engine_cs;
struct intel_uncore;
+enum intel_submission_method {
+ INTEL_SUBMISSION_RING,
+ INTEL_SUBMISSION_ELSP,
+ INTEL_SUBMISSION_GUC,
+};
+
struct intel_gt {
struct drm_i915_private *i915;
struct intel_uncore *uncore;
@@ -118,6 +124,7 @@ struct intel_gt {
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
+ enum intel_submission_method submission_method;
/*
* Default address space (either GGTT or ppGTT depending on arch).
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 941f8af016d6..084ea65d59c0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -7,10 +7,29 @@
#include <linux/fault-inject.h>
+#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
#include "intel_gt.h"
#include "intel_gtt.h"
+struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_lmem(vm->i915, sz, 0);
+ /*
+ * Ensure all paging structures for this vm share the same dma-resv
+ * object underneath, with the idea that one object_lock() will lock
+ * them all at once.
+ */
+ if (!IS_ERR(obj)) {
+ obj->base.resv = i915_vm_resv_get(vm);
+ obj->shares_resv_from = vm;
+ }
+
+ return obj;
+}
+
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
struct drm_i915_gem_object *obj;
@@ -19,33 +38,42 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
i915_gem_shrink_all(vm->i915);
obj = i915_gem_object_create_internal(vm->i915, sz);
- /* ensure all dma objects have the same reservation class */
- if (!IS_ERR(obj))
- obj->base.resv = &vm->resv;
+ /*
+ * Ensure all paging structures for this vm share the same dma-resv
+ * object underneath, with the idea that one object_lock() will lock
+ * them all at once.
+ */
+ if (!IS_ERR(obj)) {
+ obj->base.resv = i915_vm_resv_get(vm);
+ obj->shares_resv_from = vm;
+ }
+
return obj;
}
-int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- int err;
+ enum i915_map_type type;
+ void *vaddr;
- i915_gem_object_lock(obj, NULL);
- err = i915_gem_object_pin_pages(obj);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
+ type = i915_coherent_map_type(vm->i915, obj, true);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, type);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
}
-int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- int err;
+ enum i915_map_type type;
+ void *vaddr;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ type = i915_coherent_map_type(vm->i915, obj, true);
+ vaddr = i915_gem_object_pin_map(obj, type);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
@@ -80,7 +108,7 @@ void __i915_vm_close(struct i915_address_space *vm)
int i915_vm_lock_objects(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww)
{
- if (vm->scratch[0]->base.resv == &vm->resv) {
+ if (vm->scratch[0]->base.resv == &vm->_resv) {
return i915_gem_object_lock(vm->scratch[0], ww);
} else {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
@@ -96,6 +124,22 @@ void i915_address_space_fini(struct i915_address_space *vm)
mutex_destroy(&vm->mutex);
}
+/**
+ * i915_vm_resv_release - Final struct i915_address_space destructor
+ * @kref: Pointer to the &i915_address_space.resv_ref member.
+ *
+ * This function is called when the last lock sharer no longer shares the
+ * &i915_address_space._resv lock.
+ */
+void i915_vm_resv_release(struct kref *kref)
+{
+ struct i915_address_space *vm =
+ container_of(kref, typeof(*vm), resv_ref);
+
+ dma_resv_fini(&vm->_resv);
+ kfree(vm);
+}
+
static void __i915_vm_release(struct work_struct *work)
{
struct i915_address_space *vm =
@@ -103,9 +147,8 @@ static void __i915_vm_release(struct work_struct *work)
vm->cleanup(vm);
i915_address_space_fini(vm);
- dma_resv_fini(&vm->resv);
- kfree(vm);
+ i915_vm_resv_put(vm);
}
void i915_vm_release(struct kref *kref)
@@ -122,6 +165,14 @@ void i915_vm_release(struct kref *kref)
void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
kref_init(&vm->ref);
+
+ /*
+ * Special case for GGTT that has already done an early
+ * kref_init here.
+ */
+ if (!kref_read(&vm->resv_ref))
+ kref_init(&vm->resv_ref);
+
INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
atomic_set(&vm->open, 1);
@@ -132,8 +183,23 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
*/
mutex_init(&vm->mutex);
lockdep_set_subclass(&vm->mutex, subclass);
- i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
- dma_resv_init(&vm->resv);
+
+ if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
+ i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+ } else {
+ /*
+ * CHV + BXT VTD workaround use stop_machine(),
+ * which is allowed to allocate memory. This means &vm->mutex
+ * is the outer lock, and in theory we can allocate memory inside
+ * it through stop_machine().
+ *
+ * Add the annotation for this, we use trylock in shrinker.
+ */
+ mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
+ might_alloc(GFP_KERNEL);
+ mutex_release(&vm->mutex.dep_map, _THIS_IP_);
+ }
+ dma_resv_init(&vm->_resv);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);
@@ -155,6 +221,14 @@ void clear_pages(struct i915_vma *vma)
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
}
+void *__px_vaddr(struct drm_i915_gem_object *p)
+{
+ enum i915_map_type type;
+
+ GEM_BUG_ON(!i915_gem_object_has_pages(p));
+ return page_unpack_bits(p->mm.mapping, &type);
+}
+
dma_addr_t __px_dma(struct drm_i915_gem_object *p)
{
GEM_BUG_ON(!i915_gem_object_has_pages(p));
@@ -170,32 +244,22 @@ struct page *__px_page(struct drm_i915_gem_object *p)
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
{
- struct page *page = __px_page(p);
- void *vaddr;
+ void *vaddr = __px_vaddr(p);
- vaddr = kmap(page);
memset64(vaddr, val, count);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap(page);
}
static void poison_scratch_page(struct drm_i915_gem_object *scratch)
{
- struct sgt_iter sgt;
- struct page *page;
+ void *vaddr = __px_vaddr(scratch);
u8 val;
val = 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
val = POISON_FREE;
- for_each_sgt_page(page, sgt, scratch->mm.pages) {
- void *vaddr;
-
- vaddr = kmap(page);
- memset(vaddr, val, PAGE_SIZE);
- kunmap(page);
- }
+ memset(vaddr, val, scratch->base.size);
}
int setup_scratch_page(struct i915_address_space *vm)
@@ -225,7 +289,7 @@ int setup_scratch_page(struct i915_address_space *vm)
if (IS_ERR(obj))
goto skip;
- if (pin_pt_dma(vm, obj))
+ if (map_pt_dma(vm, obj))
goto skip_obj;
/* We need a single contiguous page for our scratch */
@@ -292,7 +356,7 @@ void gtt_write_workarounds(struct intel_gt *gt)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
+ else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
@@ -309,13 +373,13 @@ void gtt_write_workarounds(struct intel_gt *gt)
* driver.
*/
if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
- INTEL_GEN(i915) <= 10)
+ GRAPHICS_VER(i915) <= 10)
intel_uncore_rmw(uncore,
GEN8_GAMW_ECO_DEV_RW_IA,
0,
GAMW_ECO_ENABLE_64K_IPS_FIELD);
- if (IS_GEN_RANGE(i915, 8, 11)) {
+ if (IS_GRAPHICS_VER(i915, 8, 11)) {
bool can_use_gtt_cache = true;
/*
@@ -397,7 +461,7 @@ static void bdw_setup_private_ppat(struct intel_uncore *uncore)
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
/* for scanout with eLLC */
- if (INTEL_GEN(i915) >= 9)
+ if (GRAPHICS_VER(i915) >= 9)
pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
else
pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
@@ -446,11 +510,11 @@ void setup_private_pat(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
- if (INTEL_GEN(i915) >= 12)
+ if (GRAPHICS_VER(i915) >= 12)
tgl_setup_private_ppat(uncore);
- else if (INTEL_GEN(i915) >= 10)
+ else if (GRAPHICS_VER(i915) >= 10)
cnl_setup_private_ppat(uncore);
else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
chv_setup_private_ppat(uncore);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index e67e34e17913..edea95b97c36 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -180,6 +180,9 @@ struct page *__px_page(struct drm_i915_gem_object *p);
dma_addr_t __px_dma(struct drm_i915_gem_object *p);
#define px_dma(px) (__px_dma(px_base(px)))
+void *__px_vaddr(struct drm_i915_gem_object *p);
+#define px_vaddr(px) (__px_vaddr(px_base(px)))
+
#define px_pt(px) \
__px_choose_expr(px, struct i915_page_table *, __x, \
__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
@@ -242,9 +245,12 @@ struct i915_address_space {
atomic_t open;
struct mutex mutex; /* protects vma and our lists */
- struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
+
+ struct kref resv_ref; /* kref to keep the reservation lock alive. */
+ struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
+#define VM_CLASS_DPT 2
struct drm_i915_gem_object *scratch[4];
/**
@@ -255,6 +261,9 @@ struct i915_address_space {
/* Global GTT */
bool is_ggtt:1;
+ /* Display page table */
+ bool is_dpt:1;
+
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
@@ -351,6 +360,8 @@ struct i915_ppgtt {
};
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
+#define i915_is_dpt(vm) ((vm)->is_dpt)
+#define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
int __must_check
i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
@@ -385,7 +396,7 @@ static inline struct i915_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
- GEM_BUG_ON(i915_is_ggtt(vm));
+ GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
return container_of(vm, struct i915_ppgtt, vm);
}
@@ -396,13 +407,36 @@ i915_vm_get(struct i915_address_space *vm)
return vm;
}
+/**
+ * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
+ * @vm: The vm whose reservation lock we want to share.
+ *
+ * Return: A pointer to the vm's reservation lock.
+ */
+static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
+{
+ kref_get(&vm->resv_ref);
+ return &vm->_resv;
+}
+
void i915_vm_release(struct kref *kref);
+void i915_vm_resv_release(struct kref *kref);
+
static inline void i915_vm_put(struct i915_address_space *vm)
{
kref_put(&vm->ref, i915_vm_release);
}
+/**
+ * i915_vm_resv_put - Release a reference on the vm's reservation lock
+ * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
+ */
+static inline void i915_vm_resv_put(struct i915_address_space *vm)
+{
+ kref_put(&vm->resv_ref, i915_vm_resv_release);
+}
+
static inline struct i915_address_space *
i915_vm_open(struct i915_address_space *vm)
{
@@ -498,6 +532,7 @@ void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
int i915_init_ggtt(struct drm_i915_private *i915);
void i915_ggtt_driver_release(struct drm_i915_private *i915);
+void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
{
@@ -511,8 +546,6 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
-#define kmap_atomic_px(px) kmap_atomic(__px_page(px_base(px)))
-
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
@@ -526,12 +559,13 @@ int setup_scratch_page(struct i915_address_space *vm);
void free_scratch(struct i915_address_space *vm);
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
+struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
struct i915_page_table *alloc_pt(struct i915_address_space *vm);
struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
struct i915_page_directory *__alloc_pd(int npde);
-int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
-int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
void free_px(struct i915_address_space *vm,
struct i915_page_table *pt, int lvl);
@@ -578,7 +612,7 @@ void setup_private_pat(struct intel_uncore *uncore);
int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
u64 size);
-int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+int i915_vm_map_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 075d741644ae..eb1a15deed22 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -64,7 +64,7 @@ static bool get_ia_constants(struct intel_llc *llc,
consts->min_gpu_freq = rps->min_freq;
consts->max_gpu_freq = rps->max_freq;
- if (INTEL_GEN(i915) >= 9) {
+ if (GRAPHICS_VER(i915) >= 9) {
/* Convert GT frequency to 50 HZ units */
consts->min_gpu_freq /= GEN9_FREQ_SCALER;
consts->max_gpu_freq /= GEN9_FREQ_SCALER;
@@ -83,13 +83,13 @@ static void calc_ia_freq(struct intel_llc *llc,
const int diff = consts->max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (INTEL_GEN(i915) >= 9) {
+ if (GRAPHICS_VER(i915) >= 9) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
*/
ring_freq = gpu_freq;
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(consts->min_ring_freq, gpu_freq);
} else if (IS_HASWELL(i915)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index e86897cde984..a27bac0a4bfb 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -47,7 +47,7 @@ static void set_offsets(u32 *regs,
*regs = MI_LOAD_REGISTER_IMM(count);
if (flags & POSTED)
*regs |= MI_LRI_FORCE_POSTED;
- if (INTEL_GEN(engine->i915) >= 11)
+ if (GRAPHICS_VER(engine->i915) >= 11)
*regs |= MI_LRI_LRM_CS_MMIO;
regs++;
@@ -70,7 +70,7 @@ static void set_offsets(u32 *regs,
if (close) {
/* Close the batch; used mainly by live_lrc_layout() */
*regs = MI_BATCH_BUFFER_END;
- if (INTEL_GEN(engine->i915) >= 10)
+ if (GRAPHICS_VER(engine->i915) >= 10)
*regs |= BIT(0);
}
}
@@ -498,22 +498,22 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
* addressing to automatic fixup the register state between the
* physical engines for virtual engine.
*/
- GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
+ GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 &&
!intel_engine_has_relative_mmio(engine));
if (engine->class == RENDER_CLASS) {
- if (INTEL_GEN(engine->i915) >= 12)
+ if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_rcs_offsets;
- else if (INTEL_GEN(engine->i915) >= 11)
+ else if (GRAPHICS_VER(engine->i915) >= 11)
return gen11_rcs_offsets;
- else if (INTEL_GEN(engine->i915) >= 9)
+ else if (GRAPHICS_VER(engine->i915) >= 9)
return gen9_rcs_offsets;
else
return gen8_rcs_offsets;
} else {
- if (INTEL_GEN(engine->i915) >= 12)
+ if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_xcs_offsets;
- else if (INTEL_GEN(engine->i915) >= 9)
+ else if (GRAPHICS_VER(engine->i915) >= 9)
return gen9_xcs_offsets;
else
return gen8_xcs_offsets;
@@ -522,9 +522,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- if (INTEL_GEN(engine->i915) >= 12)
+ if (GRAPHICS_VER(engine->i915) >= 12)
return 0x60;
- else if (INTEL_GEN(engine->i915) >= 9)
+ else if (GRAPHICS_VER(engine->i915) >= 9)
return 0x54;
else if (engine->class == RENDER_CLASS)
return 0x58;
@@ -534,9 +534,9 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
- if (INTEL_GEN(engine->i915) >= 12)
+ if (GRAPHICS_VER(engine->i915) >= 12)
return 0x74;
- else if (INTEL_GEN(engine->i915) >= 9)
+ else if (GRAPHICS_VER(engine->i915) >= 9)
return 0x68;
else if (engine->class == RENDER_CLASS)
return 0xd8;
@@ -546,9 +546,9 @@ static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
{
- if (INTEL_GEN(engine->i915) >= 12)
+ if (GRAPHICS_VER(engine->i915) >= 12)
return 0x12;
- else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+ else if (GRAPHICS_VER(engine->i915) >= 9 || engine->class == RENDER_CLASS)
return 0x18;
else
return -1;
@@ -581,9 +581,9 @@ static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
if (engine->class != RENDER_CLASS)
return -1;
- if (INTEL_GEN(engine->i915) >= 12)
+ if (GRAPHICS_VER(engine->i915) >= 12)
return 0xb6;
- else if (INTEL_GEN(engine->i915) >= 11)
+ else if (GRAPHICS_VER(engine->i915) >= 11)
return 0xaa;
else
return -1;
@@ -592,9 +592,9 @@ static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
static u32
lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
{
- switch (INTEL_GEN(engine->i915)) {
+ switch (GRAPHICS_VER(engine->i915)) {
default:
- MISSING_CASE(INTEL_GEN(engine->i915));
+ MISSING_CASE(GRAPHICS_VER(engine->i915));
fallthrough;
case 12:
return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
@@ -637,7 +637,7 @@ static void init_common_regs(u32 * const regs,
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (inhibit)
ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
- if (INTEL_GEN(engine->i915) < 11)
+ if (GRAPHICS_VER(engine->i915) < 11)
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
regs[CTX_CONTEXT_CONTROL] = ctl;
@@ -805,7 +805,7 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
- if (INTEL_GEN(engine->i915) == 12) {
+ if (GRAPHICS_VER(engine->i915) == 12) {
ce->wa_bb_page = context_size / PAGE_SIZE;
context_size += PAGE_SIZE;
}
@@ -903,7 +903,9 @@ lrc_pre_pin(struct intel_context *ce,
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
*vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ce->engine->i915) |
+ i915_coherent_map_type(ce->engine->i915,
+ ce->state->obj,
+ false) |
I915_MAP_OVERRIDE);
return PTR_ERR_OR_ZERO(*vaddr);
@@ -1112,7 +1114,7 @@ static u32 lrc_descriptor(const struct intel_context *ce)
desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
- if (IS_GEN(ce->vm->i915, 8))
+ if (GRAPHICS_VER(ce->vm->i915) == 8)
desc |= GEN8_CTX_L3LLC_COHERENT;
return i915_ggtt_offset(ce->state) | desc;
@@ -1467,7 +1469,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
if (engine->class != RENDER_CLASS)
return;
- switch (INTEL_GEN(engine->i915)) {
+ switch (GRAPHICS_VER(engine->i915)) {
case 12:
case 11:
return;
@@ -1484,7 +1486,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
wa_bb_fn[1] = NULL;
break;
default:
- MISSING_CASE(INTEL_GEN(engine->i915));
+ MISSING_CASE(GRAPHICS_VER(engine->i915));
return;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index b14138fd505c..17848807f111 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -344,11 +344,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
- } else if (INTEL_GEN(i915) >= 12) {
+ } else if (GRAPHICS_VER(i915) >= 12) {
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
- } else if (IS_GEN(i915, 11)) {
+ } else if (GRAPHICS_VER(i915) == 11) {
table->size = ARRAY_SIZE(icl_mocs_table);
table->table = icl_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
@@ -361,7 +361,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
} else {
- drm_WARN_ONCE(&i915->drm, INTEL_GEN(i915) >= 9,
+ drm_WARN_ONCE(&i915->drm, GRAPHICS_VER(i915) >= 9,
"Platform that should have a MOCS table does not.\n");
return 0;
}
@@ -370,7 +370,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
return 0;
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
- if (IS_GEN(i915, 9)) {
+ if (GRAPHICS_VER(i915) == 9) {
int i;
for (i = 0; i < table->size; i++)
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 014ae8ac4480..886060f7e6fc 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -87,11 +87,10 @@ write_dma_entry(struct drm_i915_gem_object * const pdma,
const unsigned short idx,
const u64 encoded_entry)
{
- u64 * const vaddr = kmap_atomic(__px_page(pdma));
+ u64 * const vaddr = __px_vaddr(pdma);
vaddr[idx] = encoded_entry;
clflush_cache_range(&vaddr[idx], sizeof(u64));
- kunmap_atomic(vaddr);
}
void
@@ -147,9 +146,9 @@ int i915_ppgtt_init_hw(struct intel_gt *gt)
gtt_write_workarounds(gt);
- if (IS_GEN(i915, 6))
+ if (GRAPHICS_VER(i915) == 6)
gen6_ppgtt_enable(gt);
- else if (IS_GEN(i915, 7))
+ else if (GRAPHICS_VER(i915) == 7)
gen7_ppgtt_enable(gt);
return 0;
@@ -158,7 +157,7 @@ int i915_ppgtt_init_hw(struct intel_gt *gt)
static struct i915_ppgtt *
__ppgtt_create(struct intel_gt *gt)
{
- if (INTEL_GEN(gt->i915) < 8)
+ if (GRAPHICS_VER(gt->i915) < 8)
return gen6_ppgtt_create(gt);
else
return gen8_ppgtt_create(gt);
@@ -258,7 +257,7 @@ int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
return 0;
}
-int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+int i915_vm_map_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash)
{
struct i915_page_table *pt;
@@ -266,7 +265,7 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
for (pt = stash->pt[n]; pt; pt = pt->stash) {
- err = pin_pt_dma_locked(vm, pt->base);
+ err = map_pt_dma_locked(vm, pt->base);
if (err)
return err;
}
@@ -308,7 +307,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
ppgtt->vm.dma = i915->drm.dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
- dma_resv_init(&ppgtt->vm.resv);
+ dma_resv_init(&ppgtt->vm._resv);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 3b7e62debe7e..259d7eb4e165 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -109,7 +109,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
- if (INTEL_GEN(gt->i915) >= 12) {
+ if (GRAPHICS_VER(gt->i915) >= 12) {
for (i = 0; i < I915_MAX_VCS; i++)
if (HAS_ENGINE(gt, _VCS(i)))
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
@@ -126,7 +126,7 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
enum intel_engine_id id;
/* 2b: Program RC6 thresholds.*/
- if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
+ if (GRAPHICS_VER(rc6_to_i915(rc6)) >= 10) {
set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
} else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
@@ -249,9 +249,9 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
rc6vids = 0;
ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
&rc6vids, NULL);
- if (IS_GEN(i915, 6) && ret) {
+ if (GRAPHICS_VER(i915) == 6 && ret) {
drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
- } else if (IS_GEN(i915, 6) &&
+ } else if (GRAPHICS_VER(i915) == 6 &&
(GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
drm_dbg(&i915->drm,
"You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
@@ -515,7 +515,7 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
struct intel_uncore *uncore = rc6_to_uncore(rc6);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- if (INTEL_GEN(i915) >= 9)
+ if (GRAPHICS_VER(i915) >= 9)
set(uncore, GEN9_PG_ENABLE, 0);
set(uncore, GEN6_RC_CONTROL, 0);
set(uncore, GEN6_RC_STATE, 0);
@@ -575,13 +575,13 @@ void intel_rc6_enable(struct intel_rc6 *rc6)
chv_rc6_enable(rc6);
else if (IS_VALLEYVIEW(i915))
vlv_rc6_enable(rc6);
- else if (INTEL_GEN(i915) >= 11)
+ else if (GRAPHICS_VER(i915) >= 11)
gen11_rc6_enable(rc6);
- else if (INTEL_GEN(i915) >= 9)
+ else if (GRAPHICS_VER(i915) >= 9)
gen9_rc6_enable(rc6);
else if (IS_BROADWELL(i915))
gen8_rc6_enable(rc6);
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
gen6_rc6_enable(rc6);
rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index be6f2c8f5184..f7366b054f8e 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -5,6 +5,8 @@
#include "i915_drv.h"
#include "intel_memory_region.h"
+#include "intel_region_lmem.h"
+#include "intel_region_ttm.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "intel_region_lmem.h"
@@ -66,9 +68,9 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
static void
region_lmem_release(struct intel_memory_region *mem)
{
- release_fake_lmem_bar(mem);
+ intel_region_ttm_fini(mem);
io_mapping_fini(&mem->iomap);
- intel_memory_region_release_buddy(mem);
+ release_fake_lmem_bar(mem);
}
static int
@@ -83,12 +85,21 @@ region_lmem_init(struct intel_memory_region *mem)
if (!io_mapping_init_wc(&mem->iomap,
mem->io_start,
- resource_size(&mem->region)))
- return -EIO;
+ resource_size(&mem->region))) {
+ ret = -EIO;
+ goto out_no_io;
+ }
- ret = intel_memory_region_init_buddy(mem);
+ ret = intel_region_ttm_init(mem);
if (ret)
- io_mapping_fini(&mem->iomap);
+ goto out_no_buddy;
+
+ return 0;
+
+out_no_buddy:
+ io_mapping_fini(&mem->iomap);
+out_no_io:
+ release_fake_lmem_bar(mem);
return ret;
}
@@ -127,6 +138,8 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt)
mappable_end,
PAGE_SIZE,
io_start,
+ INTEL_MEMORY_LOCAL,
+ 0,
&intel_region_lmem_ops);
if (!IS_ERR(mem)) {
drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
@@ -177,7 +190,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_memory_region *mem;
resource_size_t io_start;
resource_size_t lmem_size;
@@ -198,6 +211,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size,
I915_GTT_PAGE_SIZE_4K,
io_start,
+ INTEL_MEMORY_LOCAL,
+ 0,
&intel_region_lmem_ops);
if (IS_ERR(mem))
return mem;
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index b03e197b1d99..b575cd6e0b7a 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -15,7 +15,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
if (engine->class != RENDER_CLASS)
return NULL;
- switch (INTEL_GEN(engine->i915)) {
+ switch (GRAPHICS_VER(engine->i915)) {
case 6:
return &gen6_null_state;
case 7:
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index a377c4588aaa..72251638d4ea 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -338,15 +338,69 @@ static int gen6_reset_engines(struct intel_gt *gt,
return gen6_hw_domain_reset(gt, hw_mask);
}
-static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
+static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
+{
+ int vecs_id;
+
+ GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
+
+ vecs_id = _VECS((engine->instance) / 2);
+
+ return engine->gt->engine[vecs_id];
+}
+
+struct sfc_lock_data {
+ i915_reg_t lock_reg;
+ i915_reg_t ack_reg;
+ i915_reg_t usage_reg;
+ u32 lock_bit;
+ u32 ack_bit;
+ u32 usage_bit;
+ u32 reset_bit;
+};
+
+static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
+ struct sfc_lock_data *sfc_lock)
+{
+ switch (engine->class) {
+ default:
+ MISSING_CASE(engine->class);
+ fallthrough;
+ case VIDEO_DECODE_CLASS:
+ sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine);
+ sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
+
+ sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
+
+ sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
+ sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
+
+ break;
+ case VIDEO_ENHANCEMENT_CLASS:
+ sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine);
+ sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+
+ sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine);
+ sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
+
+ sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine);
+ sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
+ sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
+
+ break;
+ }
+}
+
+static int gen11_lock_sfc(struct intel_engine_cs *engine,
+ u32 *reset_mask,
+ u32 *unlock_mask)
{
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
- i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
- u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
- i915_reg_t sfc_usage;
- u32 sfc_usage_bit;
- u32 sfc_reset_bit;
+ struct sfc_lock_data sfc_lock;
+ bool lock_obtained, lock_to_other = false;
int ret;
switch (engine->class) {
@@ -354,53 +408,72 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
return 0;
- sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
- sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
-
- sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
- sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
+ fallthrough;
+ case VIDEO_ENHANCEMENT_CLASS:
+ get_sfc_forced_lock_data(engine, &sfc_lock);
- sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
- sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
- sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
break;
+ default:
+ return 0;
+ }
- case VIDEO_ENHANCEMENT_CLASS:
- sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
- sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+ if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
+ struct intel_engine_cs *paired_vecs;
- sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
- sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
+ if (engine->class != VIDEO_DECODE_CLASS ||
+ GRAPHICS_VER(engine->i915) != 12)
+ return 0;
- sfc_usage = GEN11_VECS_SFC_USAGE(engine);
- sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
- sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
- break;
+ /*
+ * Wa_14010733141
+ *
+ * If the VCS-MFX isn't using the SFC, we also need to check
+ * whether VCS-HCP is using it. If so, we need to issue a *VE*
+ * forced lock on the VE engine that shares the same SFC.
+ */
+ if (!(intel_uncore_read_fw(uncore,
+ GEN12_HCP_SFC_LOCK_STATUS(engine)) &
+ GEN12_HCP_SFC_USAGE_BIT))
+ return 0;
- default:
- return 0;
+ paired_vecs = find_sfc_paired_vecs_engine(engine);
+ get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
+ lock_to_other = true;
+ *unlock_mask |= paired_vecs->mask;
+ } else {
+ *unlock_mask |= engine->mask;
}
/*
- * If the engine is using a SFC, tell the engine that a software reset
+ * If the engine is using an SFC, tell the engine that a software reset
* is going to happen. The engine will then try to force lock the SFC.
* If SFC ends up being locked to the engine we want to reset, we have
* to reset it as well (we will unlock it once the reset sequence is
* completed).
*/
- if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
- return 0;
-
- rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
+ rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
ret = __intel_wait_for_register_fw(uncore,
- sfc_forced_lock_ack,
- sfc_forced_lock_ack_bit,
- sfc_forced_lock_ack_bit,
+ sfc_lock.ack_reg,
+ sfc_lock.ack_bit,
+ sfc_lock.ack_bit,
1000, 0, NULL);
- /* Was the SFC released while we were trying to lock it? */
- if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
+ /*
+ * Was the SFC released while we were trying to lock it?
+ *
+ * We should reset both the engine and the SFC if:
+ * - We were locking the SFC to this engine and the lock succeeded
+ * OR
+ * - We were locking the SFC to a different engine (Wa_14010733141)
+ * but the SFC was released before the lock was obtained.
+ *
+ * Otherwise we need only reset the engine by itself and we can
+ * leave the SFC alone.
+ */
+ lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
+ sfc_lock.usage_bit) != 0;
+ if (lock_obtained == lock_to_other)
return 0;
if (ret) {
@@ -408,7 +481,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
return ret;
}
- *hw_mask |= sfc_reset_bit;
+ *reset_mask |= sfc_lock.reset_bit;
return 0;
}
@@ -416,28 +489,19 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
- i915_reg_t sfc_forced_lock;
- u32 sfc_forced_lock_bit;
-
- switch (engine->class) {
- case VIDEO_DECODE_CLASS:
- if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
- return;
+ struct sfc_lock_data sfc_lock = {};
- sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
- sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
- break;
-
- case VIDEO_ENHANCEMENT_CLASS:
- sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
- sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
- break;
+ if (engine->class != VIDEO_DECODE_CLASS &&
+ engine->class != VIDEO_ENHANCEMENT_CLASS)
+ return;
- default:
+ if (engine->class == VIDEO_DECODE_CLASS &&
+ (BIT(engine->instance) & vdbox_sfc_access) == 0)
return;
- }
- rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
+ get_sfc_forced_lock_data(engine, &sfc_lock);
+
+ rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
}
static int gen11_reset_engines(struct intel_gt *gt,
@@ -456,23 +520,23 @@ static int gen11_reset_engines(struct intel_gt *gt,
};
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- u32 hw_mask;
+ u32 reset_mask, unlock_mask = 0;
int ret;
if (engine_mask == ALL_ENGINES) {
- hw_mask = GEN11_GRDOM_FULL;
+ reset_mask = GEN11_GRDOM_FULL;
} else {
- hw_mask = 0;
+ reset_mask = 0;
for_each_engine_masked(engine, gt, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
- hw_mask |= hw_engine_mask[engine->id];
- ret = gen11_lock_sfc(engine, &hw_mask);
+ reset_mask |= hw_engine_mask[engine->id];
+ ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
if (ret)
goto sfc_unlock;
}
}
- ret = gen6_hw_domain_reset(gt, hw_mask);
+ ret = gen6_hw_domain_reset(gt, reset_mask);
sfc_unlock:
/*
@@ -480,10 +544,14 @@ sfc_unlock:
* gen11_lock_sfc to make sure that we clean properly if something
* wrong happened during the lock (e.g. lock acquired after timeout
* expiration).
+ *
+ * Due to Wa_14010733141, we may have locked an SFC to an engine that
+ * wasn't being reset. So instead of calling gen11_unlock_sfc()
+ * on engine_mask, we instead call it on the mask of engines that our
+ * gen11_lock_sfc() calls told us actually had locks attempted.
*/
- if (engine_mask != ALL_ENGINES)
- for_each_engine_masked(engine, gt, engine_mask, tmp)
- gen11_unlock_sfc(engine);
+ for_each_engine_masked(engine, gt, unlock_mask, tmp)
+ gen11_unlock_sfc(engine);
return ret;
}
@@ -565,7 +633,7 @@ static int gen8_reset_engines(struct intel_gt *gt,
*/
}
- if (INTEL_GEN(gt->i915) >= 11)
+ if (GRAPHICS_VER(gt->i915) >= 11)
ret = gen11_reset_engines(gt, engine_mask, retry);
else
ret = gen6_reset_engines(gt, engine_mask, retry);
@@ -594,17 +662,17 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
if (is_mock_gt(gt))
return mock_reset;
- else if (INTEL_GEN(i915) >= 8)
+ else if (GRAPHICS_VER(i915) >= 8)
return gen8_reset_engines;
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
return gen6_reset_engines;
- else if (INTEL_GEN(i915) >= 5)
+ else if (GRAPHICS_VER(i915) >= 5)
return ilk_do_reset;
else if (IS_G4X(i915))
return g4x_do_reset;
else if (IS_G33(i915) || IS_PINEVIEW(i915))
return g33_do_reset;
- else if (INTEL_GEN(i915) >= 3)
+ else if (GRAPHICS_VER(i915) >= 3)
return i915_do_reset;
else
return NULL;
@@ -656,7 +724,7 @@ bool intel_has_reset_engine(const struct intel_gt *gt)
int intel_reset_guc(struct intel_gt *gt)
{
u32 guc_domain =
- INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
+ GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
int ret;
GEM_BUG_ON(!HAS_GT_UC(gt->i915));
@@ -1118,7 +1186,6 @@ static int intel_gt_reset_engine(struct intel_engine_cs *engine)
int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
{
struct intel_gt *gt = engine->gt;
- bool uses_guc = intel_engine_in_guc_submission_mode(engine);
int ret;
ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
@@ -1134,10 +1201,10 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
"Resetting %s for %s\n", engine->name, msg);
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
- if (!uses_guc)
- ret = intel_gt_reset_engine(engine);
- else
+ if (intel_engine_uses_guc(engine))
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
+ else
+ ret = intel_gt_reset_engine(engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index aee0a77c77e0..7c4d5158e03b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -51,11 +51,14 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
if (unlikely(ret))
goto err_unpin;
- if (i915_vma_is_map_and_fenceable(vma))
+ if (i915_vma_is_map_and_fenceable(vma)) {
addr = (void __force *)i915_vma_pin_iomap(vma);
- else
- addr = i915_gem_object_pin_map(vma->obj,
- i915_coherent_map_type(vma->vm->i915));
+ } else {
+ int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
+
+ addr = i915_gem_object_pin_map(vma->obj, type);
+ }
+
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err_ring;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 9585546556ee..0c423f096e2b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -12,6 +12,7 @@
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_gt.h"
+#include "intel_gt_irq.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "shmem_utils.h"
@@ -28,7 +29,7 @@ static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
* lost interrupts following a reset.
*/
if (engine->class == RENDER_CLASS) {
- if (INTEL_GEN(engine->i915) >= 6)
+ if (GRAPHICS_VER(engine->i915) >= 6)
mask &= ~BIT(0);
else
mask &= ~I915_USER_INTERRUPT;
@@ -42,7 +43,7 @@ static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
u32 addr;
addr = lower_32_bits(phys);
- if (INTEL_GEN(engine->i915) >= 4)
+ if (GRAPHICS_VER(engine->i915) >= 4)
addr |= (phys >> 28) & 0xf0;
intel_uncore_write(engine->uncore, HWS_PGA, addr);
@@ -70,7 +71,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
- if (IS_GEN(engine->i915, 7)) {
+ if (GRAPHICS_VER(engine->i915) == 7) {
switch (engine->id) {
/*
* No more rings exist on Gen7. Default case is only to shut up
@@ -92,7 +93,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
hwsp = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN(engine->i915, 6)) {
+ } else if (GRAPHICS_VER(engine->i915) == 6) {
hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
hwsp = RING_HWS_PGA(engine->mmio_base);
@@ -104,7 +105,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
static void flush_cs_tlb(struct intel_engine_cs *engine)
{
- if (!IS_GEN_RANGE(engine->i915, 6, 7))
+ if (!IS_GRAPHICS_VER(engine->i915, 6, 7))
return;
/* ring should be idle before issuing a sync flush*/
@@ -152,7 +153,7 @@ static void set_pp_dir(struct intel_engine_cs *engine)
ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
- if (INTEL_GEN(engine->i915) >= 7) {
+ if (GRAPHICS_VER(engine->i915) >= 7) {
ENGINE_WRITE_FW(engine,
RING_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
@@ -228,7 +229,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
5000, 0, NULL))
goto err;
- if (INTEL_GEN(engine->i915) > 2)
+ if (GRAPHICS_VER(engine->i915) > 2)
ENGINE_WRITE_FW(engine,
RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
@@ -645,9 +646,9 @@ static int mi_set_context(struct i915_request *rq,
u32 *cs;
len = 4;
- if (IS_GEN(i915, 7))
+ if (GRAPHICS_VER(i915) == 7)
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
- else if (IS_GEN(i915, 5))
+ else if (GRAPHICS_VER(i915) == 5)
len += 2;
if (flags & MI_FORCE_RESTORE) {
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
@@ -661,7 +662,7 @@ static int mi_set_context(struct i915_request *rq,
return PTR_ERR(cs);
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (IS_GEN(i915, 7)) {
+ if (GRAPHICS_VER(i915) == 7) {
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
if (num_engines) {
struct intel_engine_cs *signaller;
@@ -677,7 +678,7 @@ static int mi_set_context(struct i915_request *rq,
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
- } else if (IS_GEN(i915, 5)) {
+ } else if (GRAPHICS_VER(i915) == 5) {
/*
* This w/a is only listed for pre-production ilk a/b steppings,
* but is also mentioned for programming the powerctx. To be
@@ -715,7 +716,7 @@ static int mi_set_context(struct i915_request *rq,
*/
*cs++ = MI_NOOP;
- if (IS_GEN(i915, 7)) {
+ if (GRAPHICS_VER(i915) == 7) {
if (num_engines) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
@@ -739,7 +740,7 @@ static int mi_set_context(struct i915_request *rq,
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- } else if (IS_GEN(i915, 5)) {
+ } else if (GRAPHICS_VER(i915) == 5) {
*cs++ = MI_SUSPEND_FLUSH;
}
@@ -989,14 +990,10 @@ static void gen6_bsd_submit_request(struct i915_request *request)
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
-
- engine->park = NULL;
- engine->unpark = NULL;
}
static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
{
- i9xx_set_default_submission(engine);
engine->submit_request = gen6_bsd_submit_request;
}
@@ -1004,7 +1001,7 @@ static void ring_release(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 &&
+ drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_engine_cleanup_common(engine);
@@ -1021,17 +1018,24 @@ static void ring_release(struct intel_engine_cs *engine)
intel_timeline_put(engine->legacy.timeline);
}
+static void irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ intel_engine_signal_breadcrumbs(engine);
+}
+
static void setup_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(i915) >= 6) {
+ intel_engine_set_irq_handler(engine, irq_handler);
+
+ if (GRAPHICS_VER(i915) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
- } else if (INTEL_GEN(i915) >= 5) {
+ } else if (GRAPHICS_VER(i915) >= 5) {
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
- } else if (INTEL_GEN(i915) >= 3) {
+ } else if (GRAPHICS_VER(i915) >= 3) {
engine->irq_enable = gen3_irq_enable;
engine->irq_disable = gen3_irq_disable;
} else {
@@ -1045,7 +1049,7 @@ static void setup_common(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
/* gen8+ are only supported with execlists */
- GEM_BUG_ON(INTEL_GEN(i915) >= 8);
+ GEM_BUG_ON(GRAPHICS_VER(i915) >= 8);
setup_irq(engine);
@@ -1066,14 +1070,14 @@ static void setup_common(struct intel_engine_cs *engine)
* engine->emit_init_breadcrumb().
*/
engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
- if (IS_GEN(i915, 5))
+ if (GRAPHICS_VER(i915) == 5)
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
engine->set_default_submission = i9xx_set_default_submission;
- if (INTEL_GEN(i915) >= 6)
+ if (GRAPHICS_VER(i915) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
- else if (INTEL_GEN(i915) >= 4)
+ else if (GRAPHICS_VER(i915) >= 4)
engine->emit_bb_start = gen4_emit_bb_start;
else if (IS_I830(i915) || IS_I845G(i915))
engine->emit_bb_start = i830_emit_bb_start;
@@ -1090,16 +1094,16 @@ static void setup_rcs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- if (INTEL_GEN(i915) >= 7) {
+ if (GRAPHICS_VER(i915) >= 7) {
engine->emit_flush = gen7_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
- } else if (IS_GEN(i915, 6)) {
+ } else if (GRAPHICS_VER(i915) == 6) {
engine->emit_flush = gen6_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
- } else if (IS_GEN(i915, 5)) {
+ } else if (GRAPHICS_VER(i915) == 5) {
engine->emit_flush = gen4_emit_flush_rcs;
} else {
- if (INTEL_GEN(i915) < 4)
+ if (GRAPHICS_VER(i915) < 4)
engine->emit_flush = gen2_emit_flush;
else
engine->emit_flush = gen4_emit_flush_rcs;
@@ -1114,20 +1118,20 @@ static void setup_vcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(i915) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
/* gen6 bsd needs a special wa for tail updates */
- if (IS_GEN(i915, 6))
+ if (GRAPHICS_VER(i915) == 6)
engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_emit_flush_vcs;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
- if (IS_GEN(i915, 6))
+ if (GRAPHICS_VER(i915) == 6)
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
else
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
} else {
engine->emit_flush = gen4_emit_flush_vcs;
- if (IS_GEN(i915, 5))
+ if (GRAPHICS_VER(i915) == 5)
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
@@ -1141,7 +1145,7 @@ static void setup_bcs(struct intel_engine_cs *engine)
engine->emit_flush = gen6_emit_flush_xcs;
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- if (IS_GEN(i915, 6))
+ if (GRAPHICS_VER(i915) == 6)
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
else
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
@@ -1151,7 +1155,7 @@ static void setup_vecs(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- GEM_BUG_ON(INTEL_GEN(i915) < 7);
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 7);
engine->emit_flush = gen6_emit_flush_xcs;
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
@@ -1199,7 +1203,7 @@ static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
struct i915_vma *vma;
int size, err;
- if (!IS_GEN(engine->i915, 7) || engine->class != RENDER_CLASS)
+ if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
return 0;
err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 405d814e9040..06e9a8ed4e03 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -196,7 +196,7 @@ static void rps_reset_interrupts(struct intel_rps *rps)
struct intel_gt *gt = rps_to_gt(rps);
spin_lock_irq(&gt->irq_lock);
- if (INTEL_GEN(gt->i915) >= 11)
+ if (GRAPHICS_VER(gt->i915) >= 11)
gen11_rps_reset_interrupts(rps);
else
gen6_rps_reset_interrupts(rps);
@@ -630,7 +630,7 @@ static u32 rps_limits(struct intel_rps *rps, u8 val)
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt.
*/
- if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
limits = rps->max_freq_softlimit << 23;
if (val <= rps->min_freq_softlimit)
limits |= rps->min_freq_softlimit << 14;
@@ -697,7 +697,7 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
set(uncore, GEN6_RP_CONTROL,
- (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+ (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
@@ -771,7 +771,7 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 swreq;
- if (INTEL_GEN(i915) >= 9)
+ if (GRAPHICS_VER(i915) >= 9)
swreq = GEN9_FREQUENCY(val);
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
swreq = HSW_FREQUENCY(val);
@@ -812,14 +812,14 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
err = vlv_rps_set(rps, val);
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
err = gen6_rps_set(rps, val);
else
err = gen5_rps_set(rps, val);
if (err)
return err;
- if (update && INTEL_GEN(i915) >= 6)
+ if (update && GRAPHICS_VER(i915) >= 6)
gen6_rps_set_thresholds(rps, val);
rps->last_freq = val;
@@ -853,7 +853,7 @@ void intel_rps_unpark(struct intel_rps *rps)
if (intel_rps_uses_timer(rps))
rps_start_timer(rps);
- if (IS_GEN(rps_to_i915(rps), 5))
+ if (GRAPHICS_VER(rps_to_i915(rps)) == 5)
gen5_rps_update(rps);
}
@@ -999,7 +999,7 @@ static void gen6_rps_init(struct intel_rps *rps)
rps->efficient_freq = rps->rp1_freq;
if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
- IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
u32 ddcc_status = 0;
if (sandybridge_pcode_read(i915,
@@ -1012,7 +1012,7 @@ static void gen6_rps_init(struct intel_rps *rps)
rps->max_freq);
}
- if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
/* Store the frequency values in 16.66 MHZ units, which is
* the natural hardware unit for SKL
*/
@@ -1048,7 +1048,7 @@ static bool gen9_rps_enable(struct intel_rps *rps)
struct intel_uncore *uncore = gt->uncore;
/* Program defaults and thresholds for RPS */
- if (IS_GEN(gt->i915, 9))
+ if (GRAPHICS_VER(gt->i915) == 9)
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
GEN9_FREQUENCY(rps->rp1_freq));
@@ -1365,16 +1365,16 @@ void intel_rps_enable(struct intel_rps *rps)
enabled = chv_rps_enable(rps);
else if (IS_VALLEYVIEW(i915))
enabled = vlv_rps_enable(rps);
- else if (INTEL_GEN(i915) >= 9)
+ else if (GRAPHICS_VER(i915) >= 9)
enabled = gen9_rps_enable(rps);
- else if (INTEL_GEN(i915) >= 8)
+ else if (GRAPHICS_VER(i915) >= 8)
enabled = gen8_rps_enable(rps);
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
enabled = gen6_rps_enable(rps);
else if (IS_IRONLAKE_M(i915))
enabled = gen5_rps_enable(rps);
else
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(GRAPHICS_VER(i915));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
if (!enabled)
return;
@@ -1393,7 +1393,7 @@ void intel_rps_enable(struct intel_rps *rps)
if (has_busy_stats(rps))
intel_rps_set_timer(rps);
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
intel_rps_set_interrupts(rps);
else
/* Ironlake currently uses intel_ips.ko */ {}
@@ -1414,7 +1414,7 @@ void intel_rps_disable(struct intel_rps *rps)
intel_rps_clear_interrupts(rps);
intel_rps_clear_timer(rps);
- if (INTEL_GEN(i915) >= 6)
+ if (GRAPHICS_VER(i915) >= 6)
gen6_rps_disable(rps);
else if (IS_IRONLAKE_M(i915))
gen5_rps_disable(rps);
@@ -1453,14 +1453,14 @@ int intel_gpu_freq(struct intel_rps *rps, int val)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- if (INTEL_GEN(i915) >= 9)
+ if (GRAPHICS_VER(i915) >= 9)
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
GEN9_FREQ_SCALER);
else if (IS_CHERRYVIEW(i915))
return chv_gpu_freq(rps, val);
else if (IS_VALLEYVIEW(i915))
return byt_gpu_freq(rps, val);
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
return val * GT_FREQUENCY_MULTIPLIER;
else
return val;
@@ -1470,14 +1470,14 @@ int intel_freq_opcode(struct intel_rps *rps, int val)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- if (INTEL_GEN(i915) >= 9)
+ if (GRAPHICS_VER(i915) >= 9)
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
GT_FREQUENCY_MULTIPLIER);
else if (IS_CHERRYVIEW(i915))
return chv_freq_opcode(rps, val);
else if (IS_VALLEYVIEW(i915))
return byt_freq_opcode(rps, val);
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
else
return val;
@@ -1770,11 +1770,11 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
spin_unlock(&gt->irq_lock);
}
- if (INTEL_GEN(gt->i915) >= 8)
+ if (GRAPHICS_VER(gt->i915) >= 8)
return;
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine[VECS0]);
+ intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -1833,7 +1833,7 @@ void intel_rps_init(struct intel_rps *rps)
chv_rps_init(rps);
else if (IS_VALLEYVIEW(i915))
vlv_rps_init(rps);
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
gen6_rps_init(rps);
else if (IS_IRONLAKE_M(i915))
gen5_rps_init(rps);
@@ -1843,7 +1843,7 @@ void intel_rps_init(struct intel_rps *rps)
rps->min_freq_softlimit = rps->min_freq;
/* After setting max-softlimit, find the overclock max freq */
- if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
+ if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
u32 params = 0;
sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
@@ -1872,16 +1872,16 @@ void intel_rps_init(struct intel_rps *rps)
*
* TODO: verify if this can be reproduced on VLV,CHV.
*/
- if (INTEL_GEN(i915) <= 7)
+ if (GRAPHICS_VER(i915) <= 7)
rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
- if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
+ if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
void intel_rps_sanitize(struct intel_rps *rps)
{
- if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
rps_disable_interrupts(rps);
}
@@ -1892,11 +1892,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
cagf = (rpstat >> 8) & 0xff;
- else if (INTEL_GEN(i915) >= 9)
+ else if (GRAPHICS_VER(i915) >= 9)
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
- else if (INTEL_GEN(i915) >= 6)
+ else if (GRAPHICS_VER(i915) >= 6)
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
else
cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >>
@@ -1915,7 +1915,7 @@ static u32 read_cagf(struct intel_rps *rps)
vlv_punit_get(i915);
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915);
- } else if (INTEL_GEN(i915) >= 6) {
+ } else if (GRAPHICS_VER(i915) >= 6) {
freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
} else {
freq = intel_uncore_read(uncore, MEMSTAT_ILK);
@@ -1968,7 +1968,7 @@ void intel_rps_driver_register(struct intel_rps *rps)
* We only register the i915 ips part with intel-ips once everything is
* set up, to avoid intel-ips sneaking in and reading bogus values.
*/
- if (IS_GEN(gt->i915, 5)) {
+ if (GRAPHICS_VER(gt->i915) == 5) {
GEM_BUG_ON(ips_mchdev);
rcu_assign_pointer(ips_mchdev, gt->i915);
ips_ping_for_i915_load();
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 0d9f74aec8fe..367fd44b81c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -590,13 +590,13 @@ void intel_sseu_info_init(struct intel_gt *gt)
cherryview_sseu_info_init(gt);
else if (IS_BROADWELL(i915))
bdw_sseu_info_init(gt);
- else if (IS_GEN(i915, 9))
+ else if (GRAPHICS_VER(i915) == 9)
gen9_sseu_info_init(gt);
- else if (IS_GEN(i915, 10))
+ else if (GRAPHICS_VER(i915) == 10)
gen10_sseu_info_init(gt);
- else if (IS_GEN(i915, 11))
+ else if (GRAPHICS_VER(i915) == 11)
gen11_sseu_info_init(gt);
- else if (INTEL_GEN(i915) >= 12)
+ else if (GRAPHICS_VER(i915) >= 12)
gen12_sseu_info_init(gt);
}
@@ -613,7 +613,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
* No explicit RPCS request is needed to ensure full
* slice/subslice/EU enablement prior to Gen9.
*/
- if (INTEL_GEN(i915) < 9)
+ if (GRAPHICS_VER(i915) < 9)
return 0;
/*
@@ -651,7 +651,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
* subslices are enabled, or a count between one and four on the first
* slice.
*/
- if (IS_GEN(i915, 11) &&
+ if (GRAPHICS_VER(i915) == 11 &&
slices == 1 &&
subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
GEM_BUG_ON(subslices & 1);
@@ -669,7 +669,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
if (sseu->has_slice_pg) {
u32 mask, val = slices;
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
mask = GEN11_RPCS_S_CNT_MASK;
val <<= GEN11_RPCS_S_CNT_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
index 51780282d872..714fe8495775 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -248,7 +248,7 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
struct sseu_dev_info sseu;
intel_wakeref_t wakeref;
- if (INTEL_GEN(i915) < 8)
+ if (GRAPHICS_VER(i915) < 8)
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
@@ -265,9 +265,9 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
cherryview_sseu_device_status(gt, &sseu);
else if (IS_BROADWELL(i915))
bdw_sseu_device_status(gt, &sseu);
- else if (IS_GEN(i915, 9))
+ else if (GRAPHICS_VER(i915) == 9)
gen9_sseu_device_status(gt, &sseu);
- else if (INTEL_GEN(i915) >= 10)
+ else if (GRAPHICS_VER(i915) >= 10)
gen10_sseu_device_status(gt, &sseu);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index f19cf6d2fa85..c4a126c8caef 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -32,7 +32,6 @@ static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
return vma;
}
-__i915_active_call
static void __timeline_retire(struct i915_active *active)
{
struct intel_timeline *tl =
@@ -104,7 +103,8 @@ static int intel_timeline_init(struct intel_timeline *timeline,
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
- i915_active_init(&timeline->active, __timeline_active, __timeline_retire);
+ i915_active_init(&timeline->active, __timeline_active,
+ __timeline_retire, 0);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 2c6f7217469f..b62d1e31a645 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -607,9 +607,38 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
}
+/*
+ * These settings aren't actually workarounds, but general tuning settings that
+ * need to be programmed on several platforms.
+ */
+static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ /*
+ * Although some platforms refer to it as Wa_1604555607, we need to
+ * program it even on those that don't explicitly list that
+ * workaround.
+ *
+ * Note that the programming of this register is further modified
+ * according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
+ * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
+ * value when read. The default value for this register is zero for all
+ * fields and there are no bit masks. So instead of doing a RMW we
+ * should just write TDS timer value. For the same reason read
+ * verification is ignored.
+ */
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128,
+ 0);
+}
+
static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ gen12_ctx_gt_tuning_init(engine, wal);
+
/*
* Wa_1409142259:tgl
* Wa_1409347922:tgl
@@ -628,27 +657,17 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
-}
-
-static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
- struct i915_wa_list *wal)
-{
- gen12_ctx_workarounds_init(engine, wal);
/*
- * Wa_1604555607:tgl,rkl
+ * Wa_16011163337
*
- * Note that the implementation of this workaround is further modified
- * according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
- * FF_MODE2 register will return the wrong value when read. The default
- * value for this register is zero for all fields and there are no bit
- * masks. So instead of doing a RMW we should just write the GS Timer
- * and TDS timer values for Wa_1604555607 and Wa_16011163337.
+ * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due
+ * to Wa_1608008084.
*/
wa_add(wal,
FF_MODE2,
- FF_MODE2_GS_TIMER_MASK | FF_MODE2_TDS_TIMER_MASK,
- FF_MODE2_GS_TIMER_224 | FF_MODE2_TDS_TIMER_128,
+ FF_MODE2_GS_TIMER_MASK,
+ FF_MODE2_GS_TIMER_224,
0);
}
@@ -664,16 +683,6 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_22010493298 */
wa_masked_en(wal, HIZ_CHICKEN,
DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
-
- /*
- * Wa_16011163337
- *
- * Like in tgl_ctx_workarounds_init(), read verification is ignored due
- * to Wa_1608008084.
- */
- wa_add(wal,
- FF_MODE2,
- FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0);
}
static void
@@ -690,12 +699,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
- else if (IS_ALDERLAKE_S(i915) || IS_ROCKETLAKE(i915) ||
- IS_TIGERLAKE(i915))
- tgl_ctx_workarounds_init(engine, wal);
- else if (IS_GEN(i915, 12))
+ else if (GRAPHICS_VER(i915) == 12)
gen12_ctx_workarounds_init(engine, wal);
- else if (IS_GEN(i915, 11))
+ else if (GRAPHICS_VER(i915) == 11)
icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine, wal);
@@ -713,14 +719,14 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
chv_ctx_workarounds_init(engine, wal);
else if (IS_BROADWELL(i915))
bdw_ctx_workarounds_init(engine, wal);
- else if (IS_GEN(i915, 7))
+ else if (GRAPHICS_VER(i915) == 7)
gen7_ctx_workarounds_init(engine, wal);
- else if (IS_GEN(i915, 6))
+ else if (GRAPHICS_VER(i915) == 6)
gen6_ctx_workarounds_init(engine, wal);
- else if (INTEL_GEN(i915) < 8)
+ else if (GRAPHICS_VER(i915) < 8)
;
else
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(GRAPHICS_VER(i915));
wa_init_finish(wal);
}
@@ -944,7 +950,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
unsigned int slice, subslice;
u32 l3_en, mcr, mcr_mask;
- GEM_BUG_ON(INTEL_GEN(i915) < 10);
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 10);
/*
* WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
@@ -974,7 +980,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* of every MMIO read.
*/
- if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
+ if (GRAPHICS_VER(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
u32 l3_fuse =
intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
GEN10_L3BANK_MASK;
@@ -996,7 +1002,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
subslice--;
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
} else {
@@ -1078,11 +1084,37 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
}
+/*
+ * Though there are per-engine instances of these registers,
+ * they retain their value through engine resets and should
+ * only be provided on the GT workaround list rather than
+ * the engine-specific workaround list.
+ */
+static void
+wa_14011060649(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ struct intel_engine_cs *engine;
+ struct intel_gt *gt = &i915->gt;
+ int id;
+
+ for_each_engine(engine, gt, id) {
+ if (engine->class != VIDEO_DECODE_CLASS ||
+ (engine->instance % 2))
+ continue;
+
+ wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
+ IECPUNIT_CLKGATE_DIS);
+ }
+}
+
static void
gen12_gt_workarounds_init(struct drm_i915_private *i915,
struct i915_wa_list *wal)
{
wa_init_mcr(i915, wal);
+
+ /* Wa_14011060649:tgl,rkl,dg1,adls */
+ wa_14011060649(i915, wal);
}
static void
@@ -1139,9 +1171,9 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
dg1_gt_workarounds_init(i915, wal);
else if (IS_TIGERLAKE(i915))
tgl_gt_workarounds_init(i915, wal);
- else if (IS_GEN(i915, 12))
+ else if (GRAPHICS_VER(i915) == 12)
gen12_gt_workarounds_init(i915, wal);
- else if (IS_GEN(i915, 11))
+ else if (GRAPHICS_VER(i915) == 11)
icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915, wal);
@@ -1161,18 +1193,18 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
vlv_gt_workarounds_init(i915, wal);
else if (IS_IVYBRIDGE(i915))
ivb_gt_workarounds_init(i915, wal);
- else if (IS_GEN(i915, 6))
+ else if (GRAPHICS_VER(i915) == 6)
snb_gt_workarounds_init(i915, wal);
- else if (IS_GEN(i915, 5))
+ else if (GRAPHICS_VER(i915) == 5)
ilk_gt_workarounds_init(i915, wal);
else if (IS_G4X(i915))
g4x_gt_workarounds_init(i915, wal);
- else if (IS_GEN(i915, 4))
+ else if (GRAPHICS_VER(i915) == 4)
gen4_gt_workarounds_init(i915, wal);
- else if (INTEL_GEN(i915) <= 8)
+ else if (GRAPHICS_VER(i915) <= 8)
;
else
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(GRAPHICS_VER(i915));
}
void intel_gt_init_workarounds(struct drm_i915_private *i915)
@@ -1526,9 +1558,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
if (IS_DG1(i915))
dg1_whitelist_build(engine);
- else if (IS_GEN(i915, 12))
+ else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
- else if (IS_GEN(i915, 11))
+ else if (GRAPHICS_VER(i915) == 11)
icl_whitelist_build(engine);
else if (IS_CANNONLAKE(i915))
cnl_whitelist_build(engine);
@@ -1544,10 +1576,10 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
bxt_whitelist_build(engine);
else if (IS_SKYLAKE(i915))
skl_whitelist_build(engine);
- else if (INTEL_GEN(i915) <= 8)
+ else if (GRAPHICS_VER(i915) <= 8)
;
else
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(GRAPHICS_VER(i915));
wa_init_finish(w);
}
@@ -1663,7 +1695,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
ENABLE_SMALLPL);
}
- if (IS_GEN(i915, 11)) {
+ if (GRAPHICS_VER(i915) == 11) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
_3D_CHICKEN3,
@@ -1755,14 +1787,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
- /* Wa_22010271021:ehl */
- if (IS_JSL_EHL(i915))
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
+ /* Wa_22010271021 */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
}
- if (IS_GEN_RANGE(i915, 9, 12)) {
+ if (IS_GRAPHICS_VER(i915, 9, 12)) {
/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
@@ -1786,7 +1817,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
}
- if (IS_GEN(i915, 9)) {
+ if (GRAPHICS_VER(i915) == 9) {
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
wa_masked_en(wal,
GEN9_CSFE_CHICKEN1_RCS,
@@ -1828,9 +1859,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
CACHE_MODE_0_GEN7,
/* enable HiZ Raw Stall Optimization */
HIZ_RAW_STALL_OPT_DISABLE);
-
- /* WaDisable4x2SubspanOptimization:hsw */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
}
if (IS_VALLEYVIEW(i915)) {
@@ -1893,7 +1921,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
}
- if (IS_GEN(i915, 7)) {
+ if (GRAPHICS_VER(i915) == 7) {
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
wa_masked_en(wal,
GFX_MODE_GEN7,
@@ -1925,7 +1953,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN6_WIZ_HASHING_16x4);
}
- if (IS_GEN_RANGE(i915, 6, 7))
+ if (IS_GRAPHICS_VER(i915, 6, 7))
/*
* We need to disable the AsyncFlip performance optimisations in
* order to use MI_WAIT_FOR_EVENT within the CS. It should
@@ -1937,7 +1965,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
MI_MODE,
ASYNC_FLIP_PERF_DISABLE);
- if (IS_GEN(i915, 6)) {
+ if (GRAPHICS_VER(i915) == 6) {
/*
* Required for the hardware to program scanline values for
* waiting
@@ -1991,14 +2019,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
CM0_STC_EVICT_DISABLE_LRA_SNB);
}
- if (IS_GEN_RANGE(i915, 4, 6))
+ if (IS_GRAPHICS_VER(i915, 4, 6))
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
wa_add(wal, MI_MODE,
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
- if (IS_GEN(i915, 4))
+ if (GRAPHICS_VER(i915) == 4)
/*
* Disable CONSTANT_BUFFER before it is loaded from the context
* image. For as it is loaded, it is executed and the stored
@@ -2030,7 +2058,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
- if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4))
+ if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4))
return;
if (engine->class == RENDER_CLASS)
@@ -2043,7 +2071,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
- if (INTEL_GEN(engine->i915) < 4)
+ if (GRAPHICS_VER(engine->i915) < 4)
return;
wa_init_start(wal, "engine", engine->name);
@@ -2084,9 +2112,9 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
const struct mcr_range *mcr_ranges;
int i;
- if (INTEL_GEN(i915) >= 12)
+ if (GRAPHICS_VER(i915) >= 12)
mcr_ranges = mcr_ranges_gen12;
- else if (INTEL_GEN(i915) >= 8)
+ else if (GRAPHICS_VER(i915) >= 8)
mcr_ranges = mcr_ranges_gen8;
else
return false;
@@ -2115,7 +2143,7 @@ wa_list_srm(struct i915_request *rq,
u32 srm, *cs;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(i915) >= 8)
+ if (GRAPHICS_VER(i915) >= 8)
srm++;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index e1ba03b93ffa..32589c6625e1 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -55,7 +55,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
kfree(ring);
return NULL;
}
- i915_active_init(&ring->vma->active, NULL, NULL);
+ i915_active_init(&ring->vma->active, NULL, NULL, 0);
__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma));
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags);
ring->vma->node.size = sz;
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index b9bdd1d23243..26685b927169 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -88,7 +88,8 @@ static int __live_context_size(struct intel_engine_cs *engine)
goto err;
vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915));
+ i915_coherent_map_type(engine->i915,
+ ce->state->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index b32814a1f20b..64abf5feabfa 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -52,7 +52,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
- if (INTEL_GEN(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
cmd++;
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
@@ -125,7 +125,7 @@ static int perf_mi_bb_start(void *arg)
enum intel_engine_id id;
int err = 0;
- if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+ if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
return 0;
perf_begin(gt);
@@ -249,7 +249,7 @@ static int perf_mi_noop(void *arg)
enum intel_engine_id id;
int err = 0;
- if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+ if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
return 0;
perf_begin(gt);
@@ -376,34 +376,34 @@ static int intel_mmio_bases_check(void *arg)
u8 prev = U8_MAX;
for (j = 0; j < MAX_MMIO_BASES; j++) {
- u8 gen = info->mmio_bases[j].gen;
+ u8 ver = info->mmio_bases[j].graphics_ver;
u32 base = info->mmio_bases[j].base;
- if (gen >= prev) {
- pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n",
+ if (ver >= prev) {
+ pr_err("%s(%s, class:%d, instance:%d): mmio base for graphics ver %u is before the one for ver %u\n",
__func__,
intel_engine_class_repr(info->class),
info->class, info->instance,
- prev, gen);
+ prev, ver);
return -EINVAL;
}
- if (gen == 0)
+ if (ver == 0)
break;
if (!base) {
- pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n",
+ pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for graphics ver %u at entry %u\n",
__func__,
intel_engine_class_repr(info->class),
info->class, info->instance,
- base, gen, j);
+ base, ver, j);
return -EINVAL;
}
- prev = gen;
+ prev = ver;
}
- pr_debug("%s: min gen supported for %s%d is %d\n",
+ pr_debug("%s: min graphics version supported for %s%d is %u\n",
__func__,
intel_engine_class_repr(info->class),
info->instance,
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index b2c369317bf1..4896e4ccad50 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -77,7 +77,7 @@ static struct pulse *pulse_create(void)
return p;
kref_init(&p->kref);
- i915_active_init(&p->active, pulse_active, pulse_retire);
+ i915_active_init(&p->active, pulse_active, pulse_retire, 0);
return p;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 2c898622bdfb..72cca3f0da21 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -198,7 +198,7 @@ static int live_engine_timestamps(void *arg)
* the same CS clock.
*/
- if (INTEL_GEN(gt->i915) < 8)
+ if (GRAPHICS_VER(gt->i915) < 8)
return 0;
for_each_engine(engine, gt, id) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 1081cd36a2bd..1c8108d30b85 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -551,6 +551,32 @@ static int live_pin_rewind(void *arg)
return err;
}
+static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
+{
+ tasklet_disable(&engine->execlists.tasklet);
+ local_bh_disable();
+
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &engine->gt->reset.flags)) {
+ local_bh_enable();
+ tasklet_enable(&engine->execlists.tasklet);
+
+ intel_gt_set_wedged(engine->gt);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine)
+{
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &engine->gt->reset.flags);
+
+ local_bh_enable();
+ tasklet_enable(&engine->execlists.tasklet);
+}
+
static int live_hold_reset(void *arg)
{
struct intel_gt *gt = arg;
@@ -598,15 +624,9 @@ static int live_hold_reset(void *arg)
/* We have our request executing, now remove it and reset */
- local_bh_disable();
- if (test_and_set_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags)) {
- local_bh_enable();
- intel_gt_set_wedged(gt);
- err = -EBUSY;
+ err = engine_lock_reset_tasklet(engine);
+ if (err)
goto out;
- }
- tasklet_disable(&engine->execlists.tasklet);
engine->execlists.tasklet.callback(&engine->execlists.tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
@@ -618,10 +638,7 @@ static int live_hold_reset(void *arg)
__intel_engine_reset_bh(engine, NULL);
GEM_BUG_ON(rq->fence.error != -EIO);
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags);
- local_bh_enable();
+ engine_unlock_reset_tasklet(engine);
/* Check that we do not resubmit the held request */
if (!i915_request_wait(rq, 0, HZ / 5)) {
@@ -3269,7 +3286,7 @@ static int live_preempt_user(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
+ if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS)
continue; /* we need per-context GPR */
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
@@ -4293,7 +4310,7 @@ static int live_virtual_preserved(void *arg)
return 0;
/* As we use CS_GPR we cannot run before they existed on all engines. */
- if (INTEL_GEN(gt->i915) < 9)
+ if (GRAPHICS_VER(gt->i915) < 9)
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -4585,15 +4602,9 @@ static int reset_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(engine == ve->engine);
/* Take ownership of the reset and tasklet */
- local_bh_disable();
- if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags)) {
- local_bh_enable();
- intel_gt_set_wedged(gt);
- err = -EBUSY;
+ err = engine_lock_reset_tasklet(engine);
+ if (err)
goto out_heartbeat;
- }
- tasklet_disable(&engine->execlists.tasklet);
engine->execlists.tasklet.callback(&engine->execlists.tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
@@ -4612,9 +4623,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(rq->fence.error != -EIO);
/* Release our grasp on the engine, letting CS flow again */
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
- local_bh_enable();
+ engine_unlock_reset_tasklet(engine);
/* Check that we do not resubmit the held request */
i915_request_get(rq);
@@ -4716,7 +4725,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_reset),
};
- if (!HAS_EXECLISTS(i915))
+ if (i915->gt.submission_method != INTEL_SUBMISSION_ELSP)
return 0;
if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index c0845bf72dd3..b9441217ca3d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -74,10 +74,10 @@ static int live_gt_clocks(void *arg)
return 0;
}
- if (INTEL_GEN(gt->i915) < 4) /* Any CS_TIMESTAMP? */
+ if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- if (IS_GEN(gt->i915, 5))
+ if (GRAPHICS_VER(gt->i915) == 5)
/*
* XXX CS_TIMESTAMP low dword is dysfunctional?
*
@@ -86,7 +86,7 @@ static int live_gt_clocks(void *arg)
*/
return 0;
- if (IS_GEN(gt->i915, 4))
+ if (GRAPHICS_VER(gt->i915) == 4)
/*
* XXX CS_TIMESTAMP appears gibberish
*
@@ -105,7 +105,7 @@ static int live_gt_clocks(void *arg)
u64 time;
u64 dt;
- if (INTEL_GEN(engine->i915) < 7 && engine->id != RCS0)
+ if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0)
continue;
measure_clocks(engine, &cycles, &dt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 746985971c3a..853246fad05f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -69,7 +69,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map_unlocked(h->obj,
- i915_coherent_map_type(gt->i915));
+ i915_coherent_map_type(gt->i915, h->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -130,7 +130,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
}
- vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915));
+ vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
i915_vm_put(vm);
@@ -180,7 +180,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
goto cancel_rq;
batch = h->batch;
- if (INTEL_GEN(gt->i915) >= 8) {
+ if (GRAPHICS_VER(gt->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
@@ -194,7 +194,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
- } else if (INTEL_GEN(gt->i915) >= 6) {
+ } else if (GRAPHICS_VER(gt->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -207,7 +207,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
- } else if (INTEL_GEN(gt->i915) >= 4) {
+ } else if (GRAPHICS_VER(gt->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -243,7 +243,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
}
flags = 0;
- if (INTEL_GEN(gt->i915) <= 5)
+ if (GRAPHICS_VER(gt->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index 94006f117bbd..459b775f163a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -44,7 +44,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
if (found != ia_freq) {
pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
- intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
found, ia_freq);
err = -EINVAL;
break;
@@ -54,7 +54,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
if (found != ring_freq) {
pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
- intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
found, ring_freq);
err = -EINVAL;
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 85e7df6a5123..3119016d9910 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -584,7 +584,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
int err;
int n;
- if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
+ if (GRAPHICS_VER(engine->i915) < 9 && engine->class != RENDER_CLASS)
return 0; /* GPR only on rcs0 for gen8 */
err = gpr_make_dirty(engine->kernel_context);
@@ -1221,7 +1221,9 @@ static int compare_isolation(struct intel_engine_cs *engine,
}
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915));
+ i915_coherent_map_type(engine->i915,
+ ce->state->obj,
+ false));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
goto err_B1;
@@ -1387,10 +1389,10 @@ err_A:
static bool skip_isolation(const struct intel_engine_cs *engine)
{
- if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
+ if (engine->class == COPY_ENGINE_CLASS && GRAPHICS_VER(engine->i915) == 9)
return true;
- if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
+ if (engine->class == RENDER_CLASS && GRAPHICS_VER(engine->i915) == 11)
return true;
return false;
@@ -1549,7 +1551,7 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
/* We use the already reserved extra page in context state */
if (!a->wa_bb_page) {
GEM_BUG_ON(b->wa_bb_page);
- GEM_BUG_ON(INTEL_GEN(engine->i915) == 12);
+ GEM_BUG_ON(GRAPHICS_VER(engine->i915) == 12);
goto unpin_b;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index e55a887d11e2..b9bb0e6e97f7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -183,7 +183,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
* which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path.
*/
- return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
+ return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
}
static int check_l3cc_table(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index f097e420ac45..8c70b7e12074 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -34,6 +34,7 @@ int live_rc6_manual(void *arg)
struct intel_rc6 *rc6 = &gt->rc6;
u64 rc0_power, rc6_power;
intel_wakeref_t wakeref;
+ bool has_power;
ktime_t dt;
u64 res[2];
int err = 0;
@@ -50,6 +51,7 @@ int live_rc6_manual(void *arg)
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
return 0;
+ has_power = librapl_supported(gt->i915);
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
/* Force RC6 off for starters */
@@ -71,11 +73,14 @@ int live_rc6_manual(void *arg)
goto out_unlock;
}
- rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, ktime_to_ns(dt));
- if (!rc0_power) {
- pr_err("No power measured while in RC0\n");
- err = -EINVAL;
- goto out_unlock;
+ if (has_power) {
+ rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
+ ktime_to_ns(dt));
+ if (!rc0_power) {
+ pr_err("No power measured while in RC0\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
}
/* Manually enter RC6 */
@@ -97,13 +102,16 @@ int live_rc6_manual(void *arg)
err = -EINVAL;
}
- rc6_power = div64_u64(NSEC_PER_SEC * rc6_power, ktime_to_ns(dt));
- pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
- rc0_power, rc6_power);
- if (2 * rc6_power > rc0_power) {
- pr_err("GPU leaked energy while in RC6!\n");
- err = -EINVAL;
- goto out_unlock;
+ if (has_power) {
+ rc6_power = div64_u64(NSEC_PER_SEC * rc6_power,
+ ktime_to_ns(dt));
+ pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+ rc0_power, rc6_power);
+ if (2 * rc6_power > rc0_power) {
+ pr_err("GPU leaked energy while in RC6!\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
}
/* Restore what should have been the original state! */
@@ -132,7 +140,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce)
}
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
- if (INTEL_GEN(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
cmd++;
*cs++ = cmd;
@@ -185,7 +193,7 @@ int live_rc6_ctx_wa(void *arg)
int err = 0;
/* A read of CTX_INFO upsets rc6. Poke the bear! */
- if (INTEL_GEN(gt->i915) < 8)
+ if (GRAPHICS_VER(gt->i915) < 8)
return 0;
engines = randomised_engines(gt, &prng, &count);
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
index 99609271c3a7..041954408d0f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -41,10 +41,10 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
return ERR_CAST(cs);
}
- if (INTEL_GEN(engine->i915) >= 6) {
+ if (GRAPHICS_VER(engine->i915) >= 6) {
*cs++ = MI_STORE_DWORD_IMM_GEN4;
*cs++ = 0;
- } else if (INTEL_GEN(engine->i915) >= 4) {
+ } else if (GRAPHICS_VER(engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
} else {
@@ -266,7 +266,7 @@ static int live_ctx_switch_wa(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- if (IS_GEN_RANGE(gt->i915, 4, 5))
+ if (IS_GRAPHICS_VER(gt->i915, 4, 5))
continue; /* MI_STORE_DWORD is privileged! */
saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
@@ -291,7 +291,7 @@ int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_ctx_switch_wa),
};
- if (HAS_EXECLISTS(i915))
+ if (i915->gt.submission_method > INTEL_SUBMISSION_RING)
return 0;
return intel_gt_live_subtests(tests, &i915->gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 967641fee42a..7ee2513e15f9 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -204,7 +204,7 @@ static void show_pstate_limits(struct intel_rps *rps)
i915_mmio_reg_offset(BXT_RP_STATE_CAP),
intel_uncore_read(rps_to_uncore(rps),
BXT_RP_STATE_CAP));
- } else if (IS_GEN(i915, 9)) {
+ } else if (GRAPHICS_VER(i915) == 9) {
pr_info("P_STATE_LIMITS[%x]: 0x%08x\n",
i915_mmio_reg_offset(GEN9_RP_STATE_LIMITS),
intel_uncore_read(rps_to_uncore(rps),
@@ -222,7 +222,7 @@ int live_rps_clock_interval(void *arg)
struct igt_spinner spin;
int err = 0;
- if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
+ if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
return 0;
if (igt_spinner_init(&spin, gt))
@@ -506,7 +506,7 @@ static void show_pcu_config(struct intel_rps *rps)
min_gpu_freq = rps->min_freq;
max_gpu_freq = rps->max_freq;
- if (INTEL_GEN(i915) >= 9) {
+ if (GRAPHICS_VER(i915) >= 9) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq /= GEN9_FREQ_SCALER;
max_gpu_freq /= GEN9_FREQ_SCALER;
@@ -606,7 +606,7 @@ int live_rps_frequency_cs(void *arg)
int err = 0;
/*
- * The premise is that the GPU does change freqency at our behest.
+ * The premise is that the GPU does change frequency at our behest.
* Let's check there is a correspondence between the requested
* frequency, the actual frequency, and the observed clock rate.
*/
@@ -614,7 +614,7 @@ int live_rps_frequency_cs(void *arg)
if (!intel_rps_is_enabled(rps))
return 0;
- if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
+ if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */
return 0;
if (CPU_LATENCY >= 0)
@@ -747,7 +747,7 @@ int live_rps_frequency_srm(void *arg)
int err = 0;
/*
- * The premise is that the GPU does change freqency at our behest.
+ * The premise is that the GPU does change frequency at our behest.
* Let's check there is a correspondence between the requested
* frequency, the actual frequency, and the observed clock rate.
*/
@@ -755,7 +755,7 @@ int live_rps_frequency_srm(void *arg)
if (!intel_rps_is_enabled(rps))
return 0;
- if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
+ if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */
return 0;
if (CPU_LATENCY >= 0)
@@ -1031,7 +1031,7 @@ int live_rps_interrupt(void *arg)
* First, let's check whether or not we are receiving interrupts.
*/
- if (!intel_rps_has_interrupts(rps) || INTEL_GEN(gt->i915) < 6)
+ if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6)
return 0;
intel_gt_pm_get(gt);
@@ -1136,10 +1136,10 @@ int live_rps_power(void *arg)
* that theory.
*/
- if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
+ if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
return 0;
- if (!librapl_energy_uJ())
+ if (!librapl_supported(gt->i915))
return 0;
if (igt_spinner_init(&spin, gt))
@@ -1240,7 +1240,7 @@ int live_rps_dynamic(void *arg)
* moving parts into dynamic reclocking based on load.
*/
- if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
+ if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
return 0;
if (igt_spinner_init(&spin, gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 9adbd9d147be..64da0c91dec1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -457,12 +457,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
if (IS_ERR(cs))
return PTR_ERR(cs);
- if (INTEL_GEN(rq->engine->i915) >= 8) {
+ if (GRAPHICS_VER(rq->engine->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = addr;
*cs++ = 0;
*cs++ = value;
- } else if (INTEL_GEN(rq->engine->i915) >= 4) {
+ } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = addr;
@@ -992,7 +992,7 @@ static int live_hwsp_read(void *arg)
* even across multiple wraps.
*/
- if (INTEL_GEN(gt->i915) < 8) /* CS convenience [SRM/LRM] */
+ if (GRAPHICS_VER(gt->i915) < 8) /* CS convenience [SRM/LRM] */
return 0;
tl = intel_timeline_create(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 19850489a3fc..c30754daf4b1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -145,7 +145,7 @@ read_nonprivs(struct intel_context *ce)
goto err_req;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(engine->i915) >= 8)
+ if (GRAPHICS_VER(engine->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
@@ -546,7 +546,7 @@ retry:
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
- if (INTEL_GEN(engine->i915) >= 8)
+ if (GRAPHICS_VER(engine->i915) >= 8)
lrm++, srm++;
pr_debug("%s: Writing garbage to %x\n",
@@ -749,7 +749,7 @@ static int live_dirty_whitelist(void *arg)
/* Can the user write to the whitelisted registers? */
- if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+ if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0;
for_each_engine(engine, gt, id) {
@@ -829,7 +829,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
goto err_req;
srm = MI_STORE_REGISTER_MEM;
- if (INTEL_GEN(engine->i915) >= 8)
+ if (GRAPHICS_VER(engine->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
@@ -927,7 +927,7 @@ err_batch:
struct regmask {
i915_reg_t reg;
- unsigned long gen_mask;
+ u8 graphics_ver;
};
static bool find_reg(struct drm_i915_private *i915,
@@ -938,7 +938,7 @@ static bool find_reg(struct drm_i915_private *i915,
u32 offset = i915_mmio_reg_offset(reg);
while (count--) {
- if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
+ if (GRAPHICS_VER(i915) == tbl->graphics_ver &&
i915_mmio_reg_offset(tbl->reg) == offset)
return true;
tbl++;
@@ -951,8 +951,8 @@ static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
{
/* Alas, we must pardon some whitelists. Mistakes already made */
static const struct regmask pardon[] = {
- { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
- { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
+ { GEN9_CTX_PREEMPT_REG, 9 },
+ { GEN8_L3SQCREG4, 9 },
};
return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
@@ -974,7 +974,7 @@ static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
{
/* Some registers do not seem to behave and our writes unreadable */
static const struct regmask wo[] = {
- { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
+ { GEN9_SLICE_COMMON_ECO_CHICKEN1, 9 },
};
return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index f8f02aab842b..0683b27a3890 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -8,6 +8,7 @@
#include <linux/shmem_fs.h>
#include "gem/i915_gem_object.h"
+#include "gem/i915_gem_lmem.h"
#include "shmem_utils.h"
struct file *shmem_create_from_data(const char *name, void *data, size_t len)
@@ -39,7 +40,8 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
- ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
+ I915_MAP_WC : I915_MAP_WB);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
new file mode 100644
index 000000000000..90efef8a73e4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_ACTIONS_ABI_H
+#define _ABI_GUC_ACTIONS_ABI_H
+
+enum intel_guc_action {
+ INTEL_GUC_ACTION_DEFAULT = 0x0,
+ INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
+ INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
+ INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+ INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40,
+ INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
+ INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
+ INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
+ INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
+ INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
+ INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
+ INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
+ INTEL_GUC_ACTION_LIMIT
+};
+
+enum intel_guc_preempt_options {
+ INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
+ INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
+};
+
+enum intel_guc_report_status {
+ INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0,
+ INTEL_GUC_REPORT_STATUS_ACKED = 0x1,
+ INTEL_GUC_REPORT_STATUS_ERROR = 0x2,
+ INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
+};
+
+enum intel_guc_sleep_state_status {
+ INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1,
+ INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2,
+ INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3
+#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
+};
+
+#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
+#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
+#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
+#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
+
+#endif /* _ABI_GUC_ACTIONS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
new file mode 100644
index 000000000000..d38935f47ecf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_COMMUNICATION_CTB_ABI_H
+#define _ABI_GUC_COMMUNICATION_CTB_ABI_H
+
+#include <linux/types.h>
+
+/**
+ * DOC: CTB based communication
+ *
+ * The CTB (command transport buffer) communication between Host and GuC
+ * is based on u32 data stream written to the shared buffer. One buffer can
+ * be used to transmit data only in one direction (one-directional channel).
+ *
+ * Current status of the each buffer is stored in the buffer descriptor.
+ * Buffer descriptor holds tail and head fields that represents active data
+ * stream. The tail field is updated by the data producer (sender), and head
+ * field is updated by the data consumer (receiver)::
+ *
+ * +------------+
+ * | DESCRIPTOR | +=================+============+========+
+ * +============+ | | MESSAGE(s) | |
+ * | address |--------->+=================+============+========+
+ * +------------+
+ * | head | ^-----head--------^
+ * +------------+
+ * | tail | ^---------tail-----------------^
+ * +------------+
+ * | size | ^---------------size--------------------^
+ * +------------+
+ *
+ * Each message in data stream starts with the single u32 treated as a header,
+ * followed by optional set of u32 data that makes message specific payload::
+ *
+ * +------------+---------+---------+---------+
+ * | MESSAGE |
+ * +------------+---------+---------+---------+
+ * | msg[0] | [1] | ... | [n-1] |
+ * +------------+---------+---------+---------+
+ * | MESSAGE | MESSAGE PAYLOAD |
+ * + HEADER +---------+---------+---------+
+ * | | 0 | ... | n |
+ * +======+=====+=========+=========+=========+
+ * | 31:16| code| | | |
+ * +------+-----+ | | |
+ * | 15:5|flags| | | |
+ * +------+-----+ | | |
+ * | 4:0| len| | | |
+ * +------+-----+---------+---------+---------+
+ *
+ * ^-------------len-------------^
+ *
+ * The message header consists of:
+ *
+ * - **len**, indicates length of the message payload (in u32)
+ * - **code**, indicates message code
+ * - **flags**, holds various bits to control message handling
+ */
+
+/*
+ * Describes single command transport buffer.
+ * Used by both guc-master and clients.
+ */
+struct guc_ct_buffer_desc {
+ u32 addr; /* gfx address */
+ u64 host_private; /* host private data */
+ u32 size; /* size in bytes */
+ u32 head; /* offset updated by GuC*/
+ u32 tail; /* offset updated by owner */
+ u32 is_in_error; /* error indicator */
+ u32 reserved1;
+ u32 reserved2;
+ u32 owner; /* id of the channel owner */
+ u32 owner_sub_id; /* owner-defined field for extra tracking */
+ u32 reserved[5];
+} __packed;
+
+/* Type of command transport buffer */
+#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u
+#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u
+
+/*
+ * Definition of the command transport message header (DW0)
+ *
+ * bit[4..0] message len (in dwords)
+ * bit[7..5] reserved
+ * bit[8] response (G2H only)
+ * bit[8] write fence to desc (H2G only)
+ * bit[9] write status to H2G buff (H2G only)
+ * bit[10] send status back via G2H (H2G only)
+ * bit[15..11] reserved
+ * bit[31..16] action code
+ */
+#define GUC_CT_MSG_LEN_SHIFT 0
+#define GUC_CT_MSG_LEN_MASK 0x1F
+#define GUC_CT_MSG_IS_RESPONSE (1 << 8)
+#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8)
+#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9)
+#define GUC_CT_MSG_SEND_STATUS (1 << 10)
+#define GUC_CT_MSG_ACTION_SHIFT 16
+#define GUC_CT_MSG_ACTION_MASK 0xFFFF
+
+#endif /* _ABI_GUC_COMMUNICATION_CTB_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
new file mode 100644
index 000000000000..be066a62e9e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_COMMUNICATION_MMIO_ABI_H
+#define _ABI_GUC_COMMUNICATION_MMIO_ABI_H
+
+/**
+ * DOC: MMIO based communication
+ *
+ * The MMIO based communication between Host and GuC uses software scratch
+ * registers, where first register holds data treated as message header,
+ * and other registers are used to hold message payload.
+ *
+ * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
+ * but no H2G command takes more than 8 parameters and the GuC FW
+ * itself uses an 8-element array to store the H2G message.
+ *
+ * +-----------+---------+---------+---------+
+ * | MMIO[0] | MMIO[1] | ... | MMIO[n] |
+ * +-----------+---------+---------+---------+
+ * | header | optional payload |
+ * +======+====+=========+=========+=========+
+ * | 31:28|type| | | |
+ * +------+----+ | | |
+ * | 27:16|data| | | |
+ * +------+----+ | | |
+ * | 15:0|code| | | |
+ * +------+----+---------+---------+---------+
+ *
+ * The message header consists of:
+ *
+ * - **type**, indicates message type
+ * - **code**, indicates message code, is specific for **type**
+ * - **data**, indicates message data, optional, depends on **code**
+ *
+ * The following message **types** are supported:
+ *
+ * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code
+ * must be priovided in **code** field. Optional action specific parameters
+ * can be provided in remaining payload registers or **data** field.
+ *
+ * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request,
+ * action response status will be provided in **code** field. Optional
+ * response data can be returned in remaining payload registers or **data**
+ * field.
+ */
+
+#define GUC_MAX_MMIO_MSG_LEN 8
+
+#endif /* _ABI_GUC_COMMUNICATION_MMIO_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
new file mode 100644
index 000000000000..488b6061ee89
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_ERRORS_ABI_H
+#define _ABI_GUC_ERRORS_ABI_H
+
+enum intel_guc_response_status {
+ INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+ INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
+};
+
+#endif /* _ABI_GUC_ERRORS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
new file mode 100644
index 000000000000..775e21f3058c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_MESSAGES_ABI_H
+#define _ABI_GUC_MESSAGES_ABI_H
+
+#define INTEL_GUC_MSG_TYPE_SHIFT 28
+#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
+#define INTEL_GUC_MSG_DATA_SHIFT 16
+#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT)
+#define INTEL_GUC_MSG_CODE_SHIFT 0
+#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT)
+
+enum intel_guc_msg_type {
+ INTEL_GUC_MSG_TYPE_REQUEST = 0x0,
+ INTEL_GUC_MSG_TYPE_RESPONSE = 0xF,
+};
+
+#endif /* _ABI_GUC_MESSAGES_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 78305b2ec89d..f147cb389a20 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -60,15 +60,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
enum forcewake_domains fw_domains = 0;
unsigned int i;
- if (INTEL_GEN(gt->i915) >= 11) {
- guc->send_regs.base =
- i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
- guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
- } else {
- guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
- guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
- BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
- }
+ GEM_BUG_ON(!guc->send_regs.base);
+ GEM_BUG_ON(!guc->send_regs.count);
for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
@@ -96,12 +89,9 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
spin_lock_irq(&gt->irq_lock);
- if (!guc->interrupts.enabled) {
- WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
- gt->pm_guc_events);
- guc->interrupts.enabled = true;
- gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
- }
+ WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
+ gt->pm_guc_events);
+ gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
spin_unlock_irq(&gt->irq_lock);
}
@@ -112,7 +102,6 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
spin_lock_irq(&gt->irq_lock);
- guc->interrupts.enabled = false;
gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
@@ -134,18 +123,14 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc)
static void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
+ u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
spin_lock_irq(&gt->irq_lock);
- if (!guc->interrupts.enabled) {
- u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
-
- WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
- intel_uncore_write(gt->uncore,
- GEN11_GUC_SG_INTR_ENABLE, events);
- intel_uncore_write(gt->uncore,
- GEN11_GUC_SG_INTR_MASK, ~events);
- guc->interrupts.enabled = true;
- }
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_ENABLE, events);
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_MASK, ~events);
spin_unlock_irq(&gt->irq_lock);
}
@@ -154,7 +139,6 @@ static void gen11_disable_guc_interrupts(struct intel_guc *guc)
struct intel_gt *gt = guc_to_gt(guc);
spin_lock_irq(&gt->irq_lock);
- guc->interrupts.enabled = false;
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
@@ -176,16 +160,23 @@ void intel_guc_init_early(struct intel_guc *guc)
mutex_init(&guc->send_mutex);
spin_lock_init(&guc->irq_lock);
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
guc->interrupts.reset = gen11_reset_guc_interrupts;
guc->interrupts.enable = gen11_enable_guc_interrupts;
guc->interrupts.disable = gen11_disable_guc_interrupts;
+ guc->send_regs.base =
+ i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
+ guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
+
} else {
guc->notify_reg = GUC_SEND_INTERRUPT;
guc->interrupts.reset = gen9_reset_guc_interrupts;
guc->interrupts.enable = gen9_enable_guc_interrupts;
guc->interrupts.disable = gen9_disable_guc_interrupts;
+ guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
+ guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
+ BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
}
}
@@ -469,22 +460,6 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
return 0;
}
-int intel_guc_sample_forcewake(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
- u32 action[2];
-
- action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
- /* WaRsDisableCoarsePowerGating:skl,cnl */
- if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
- action[1] = 0;
- else
- /* bit 0 and 1 are for Render and Media domain separately */
- action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
/**
* intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
* @guc: intel_guc structure
@@ -682,7 +657,9 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(guc_to_gt(guc)->i915,
+ vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index bc2ba7d0626c..4abc59f6f3cd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -33,7 +33,6 @@ struct intel_guc {
unsigned int msg_enabled_mask;
struct {
- bool enabled;
void (*reset)(struct intel_guc *guc);
void (*enable)(struct intel_guc *guc);
void (*disable)(struct intel_guc *guc);
@@ -128,7 +127,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len);
-int intel_guc_sample_forcewake(struct intel_guc *guc);
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 17526717368c..9abfbc6edbd6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -6,6 +6,7 @@
#include "gt/intel_gt.h"
#include "gt/intel_lrc.h"
#include "intel_guc_ads.h"
+#include "intel_guc_fwif.h"
#include "intel_uc.h"
#include "i915_drv.h"
@@ -104,7 +105,7 @@ static void guc_mapping_table_init(struct intel_gt *gt,
GUC_MAX_INSTANCES_PER_CLASS;
for_each_engine(engine, gt, id) {
- u8 guc_class = engine->class;
+ u8 guc_class = engine_class_to_guc_class(engine->class);
system_info->mapping_table[guc_class][engine->instance] =
engine->instance;
@@ -124,7 +125,7 @@ static void __guc_ads_init(struct intel_guc *guc)
struct __guc_ads_blob *blob = guc->ads_blob;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
- u8 engine_class;
+ u8 engine_class, guc_class;
/* GuC scheduling policies */
guc_policies_init(&blob->policies);
@@ -140,29 +141,32 @@ static void __guc_ads_init(struct intel_guc *guc)
for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
if (engine_class == OTHER_CLASS)
continue;
+
+ guc_class = engine_class_to_guc_class(engine_class);
+
/*
* TODO: Set context pointer to default state to allow
* GuC to re-init guilty contexts after internal reset.
*/
- blob->ads.golden_context_lrca[engine_class] = 0;
- blob->ads.eng_state_size[engine_class] =
+ blob->ads.golden_context_lrca[guc_class] = 0;
+ blob->ads.eng_state_size[guc_class] =
intel_engine_context_size(guc_to_gt(guc),
engine_class) -
skipped_size;
}
/* System info */
- blob->system_info.engine_enabled_masks[RENDER_CLASS] = 1;
- blob->system_info.engine_enabled_masks[COPY_ENGINE_CLASS] = 1;
- blob->system_info.engine_enabled_masks[VIDEO_DECODE_CLASS] = VDBOX_MASK(gt);
- blob->system_info.engine_enabled_masks[VIDEO_ENHANCEMENT_CLASS] = VEBOX_MASK(gt);
+ blob->system_info.engine_enabled_masks[GUC_RENDER_CLASS] = 1;
+ blob->system_info.engine_enabled_masks[GUC_BLITTER_CLASS] = 1;
+ blob->system_info.engine_enabled_masks[GUC_VIDEO_CLASS] = VDBOX_MASK(gt);
+ blob->system_info.engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt);
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED] =
hweight8(gt->info.sseu.slice_mask);
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK] =
gt->info.vdbox_sfc_access;
- if (INTEL_GEN(i915) >= 12 && !IS_DGFX(i915)) {
+ if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) {
u32 distdbreg = intel_uncore_read(gt->uncore,
GEN12_DIST_DBS_POPULATED);
blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI] =
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index fa9e048cc65f..8f7b148fef58 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -7,14 +7,62 @@
#include "intel_guc_ct.h"
#include "gt/intel_gt.h"
+static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
+{
+ return container_of(ct, struct intel_guc, ct);
+}
+
+static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
+{
+ return guc_to_gt(ct_to_guc(ct));
+}
+
+static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
+{
+ return ct_to_gt(ct)->i915;
+}
+
+static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
+{
+ return &ct_to_i915(ct)->drm;
+}
+
#define CT_ERROR(_ct, _fmt, ...) \
- DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
+ drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
#ifdef CONFIG_DRM_I915_DEBUG_GUC
#define CT_DEBUG(_ct, _fmt, ...) \
- DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
+ drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
#else
#define CT_DEBUG(...) do { } while (0)
#endif
+#define CT_PROBE_ERROR(_ct, _fmt, ...) \
+ i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
+
+/**
+ * DOC: CTB Blob
+ *
+ * We allocate single blob to hold both CTB descriptors and buffers:
+ *
+ * +--------+-----------------------------------------------+------+
+ * | offset | contents | size |
+ * +========+===============================================+======+
+ * | 0x0000 | H2G `CTB Descriptor`_ (send) | |
+ * +--------+-----------------------------------------------+ 4K |
+ * | 0x0800 | G2H `CTB Descriptor`_ (recv) | |
+ * +--------+-----------------------------------------------+------+
+ * | 0x1000 | H2G `CT Buffer`_ (send) | n*4K |
+ * | | | |
+ * +--------+-----------------------------------------------+------+
+ * | 0x1000 | G2H `CT Buffer`_ (recv) | m*4K |
+ * | + n*4K | | |
+ * +--------+-----------------------------------------------+------+
+ *
+ * Size of each `CT Buffer`_ must be multiple of 4K.
+ * As we don't expect too many messages, for now use minimum sizes.
+ */
+#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
+#define CTB_H2G_BUFFER_SIZE (SZ_4K)
+#define CTB_G2H_BUFFER_SIZE (SZ_4K)
struct ct_request {
struct list_head link;
@@ -24,8 +72,9 @@ struct ct_request {
u32 *response_buf;
};
-struct ct_incoming_request {
+struct ct_incoming_msg {
struct list_head link;
+ u32 size;
u32 msg[];
};
@@ -33,6 +82,7 @@ enum { CTB_SEND = 0, CTB_RECV = 1 };
enum { CTB_OWNER_HOST = 0 };
+static void ct_receive_tasklet_func(struct tasklet_struct *t);
static void ct_incoming_request_worker_func(struct work_struct *w);
/**
@@ -41,30 +91,13 @@ static void ct_incoming_request_worker_func(struct work_struct *w);
*/
void intel_guc_ct_init_early(struct intel_guc_ct *ct)
{
+ spin_lock_init(&ct->ctbs.send.lock);
+ spin_lock_init(&ct->ctbs.recv.lock);
spin_lock_init(&ct->requests.lock);
INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->requests.incoming);
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
-}
-
-static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
-{
- return container_of(ct, struct intel_guc, ct);
-}
-
-static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
-{
- return guc_to_gt(ct_to_guc(ct));
-}
-
-static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
-{
- return ct_to_gt(ct)->i915;
-}
-
-static inline struct device *ct_to_dev(struct intel_guc_ct *ct)
-{
- return ct_to_i915(ct)->drm.dev;
+ tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
}
static inline const char *guc_ct_buffer_type_to_str(u32 type)
@@ -88,11 +121,22 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
desc->owner = CTB_OWNER_HOST;
}
-static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
+static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb, u32 cmds_addr)
{
- desc->head = 0;
- desc->tail = 0;
- desc->is_in_error = 0;
+ guc_ct_buffer_desc_init(ctb->desc, cmds_addr, ctb->size);
+}
+
+static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
+ struct guc_ct_buffer_desc *desc,
+ u32 *cmds, u32 size)
+{
+ GEM_BUG_ON(size % 4);
+
+ ctb->desc = desc;
+ ctb->cmds = cmds;
+ ctb->size = size;
+
+ guc_ct_buffer_reset(ctb, 0);
}
static int guc_action_register_ct_buffer(struct intel_guc *guc,
@@ -153,48 +197,42 @@ static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
int intel_guc_ct_init(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
+ struct guc_ct_buffer_desc *desc;
+ u32 blob_size;
+ u32 cmds_size;
void *blob;
+ u32 *cmds;
int err;
- int i;
GEM_BUG_ON(ct->vma);
- /* We allocate 1 page to hold both descriptors and both buffers.
- * ___________.....................
- * |desc (SEND)| :
- * |___________| PAGE/4
- * :___________....................:
- * |desc (RECV)| :
- * |___________| PAGE/4
- * :_______________________________:
- * |cmds (SEND) |
- * | PAGE/4
- * |_______________________________|
- * |cmds (RECV) |
- * | PAGE/4
- * |_______________________________|
- *
- * Each message can use a maximum of 32 dwords and we don't expect to
- * have more than 1 in flight at any time, so we have enough space.
- * Some logic further ahead will rely on the fact that there is only 1
- * page and that it is always mapped, so if the size is changed the
- * other code will need updating as well.
- */
-
- err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
+ blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
+ err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
if (unlikely(err)) {
- CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err);
+ CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
+ blob_size, ERR_PTR(err));
return err;
}
- CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma));
+ CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size);
- /* store pointers to desc and cmds */
- for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
- GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
- ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
- }
+ /* store pointers to desc and cmds for send ctb */
+ desc = blob;
+ cmds = blob + 2 * CTB_DESC_SIZE;
+ cmds_size = CTB_H2G_BUFFER_SIZE;
+ CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "send",
+ ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
+
+ guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size);
+
+ /* store pointers to desc and cmds for recv ctb */
+ desc = blob + CTB_DESC_SIZE;
+ cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
+ cmds_size = CTB_G2H_BUFFER_SIZE;
+ CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "recv",
+ ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
+
+ guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size);
return 0;
}
@@ -209,6 +247,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
{
GEM_BUG_ON(ct->enabled);
+ tasklet_kill(&ct->receive_tasklet);
i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
memset(ct, 0, sizeof(*ct));
}
@@ -222,37 +261,38 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
- u32 base, cmds, size;
+ u32 base, cmds;
+ void *blob;
int err;
- int i;
GEM_BUG_ON(ct->enabled);
/* vma should be already allocated and map'ed */
GEM_BUG_ON(!ct->vma);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
base = intel_guc_ggtt_offset(guc, ct->vma);
- /* (re)initialize descriptors
- * cmds buffers are in the second half of the blob page
- */
- for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
- GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
- size = PAGE_SIZE / 4;
- CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size);
- guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size);
- }
+ /* blob should start with send descriptor */
+ blob = __px_vaddr(ct->vma->obj);
+ GEM_BUG_ON(blob != ct->ctbs.send.desc);
+
+ /* (re)initialize descriptors */
+ cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
+ guc_ct_buffer_reset(&ct->ctbs.send, cmds);
+
+ cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
+ guc_ct_buffer_reset(&ct->ctbs.recv, cmds);
/*
* Register both CT buffers starting with RECV buffer.
* Descriptors are in first half of the blob.
*/
- err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
+ err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.recv.desc, blob),
INTEL_GUC_CT_BUFFER_TYPE_RECV);
if (unlikely(err))
goto err_out;
- err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
+ err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.send.desc, blob),
INTEL_GUC_CT_BUFFER_TYPE_SEND);
if (unlikely(err))
goto err_deregister;
@@ -264,7 +304,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
err_deregister:
ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
err_out:
- CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err);
+ CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
return err;
}
@@ -292,6 +332,28 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct)
return ++ct->requests.last_fence;
}
+static void write_barrier(struct intel_guc_ct *ct)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
+ GEM_BUG_ON(guc->send_regs.fw_domains);
+ /*
+ * This register is used by the i915 and GuC for MMIO based
+ * communication. Once we are in this code CTBs are the only
+ * method the i915 uses to communicate with the GuC so it is
+ * safe to write to this register (a value of 0 is NOP for MMIO
+ * communication). If we ever start mixing CTBs and MMIOs a new
+ * register will have to be chosen.
+ */
+ intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
+ } else {
+ /* wmb() sufficient for a barrier if in smem */
+ wmb();
+ }
+}
+
/**
* DOC: CTB Host to GuC request
*
@@ -313,14 +375,13 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct)
static int ct_write(struct intel_guc_ct *ct,
const u32 *action,
u32 len /* in dwords */,
- u32 fence,
- bool want_response)
+ u32 fence)
{
- struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
struct guc_ct_buffer_desc *desc = ctb->desc;
u32 head = desc->head;
u32 tail = desc->tail;
- u32 size = desc->size;
+ u32 size = ctb->size;
u32 used;
u32 header;
u32 *cmds = ctb->cmds;
@@ -329,7 +390,7 @@ static int ct_write(struct intel_guc_ct *ct,
if (unlikely(desc->is_in_error))
return -EPIPE;
- if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
+ if (unlikely(!IS_ALIGNED(head | tail, 4) ||
(tail | head) >= size))
goto corrupted;
@@ -358,8 +419,7 @@ static int ct_write(struct intel_guc_ct *ct,
* DW2+: action data
*/
header = (len << GUC_CT_MSG_LEN_SHIFT) |
- (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
- (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
+ GUC_CT_MSG_SEND_STATUS |
(action[0] << GUC_CT_MSG_ACTION_SHIFT);
CT_DEBUG(ct, "writing %*ph %*ph %*ph\n",
@@ -377,6 +437,12 @@ static int ct_write(struct intel_guc_ct *ct,
}
GEM_BUG_ON(tail > size);
+ /*
+ * make sure H2G buffer update and LRC tail update (if this triggering a
+ * submission) are visible before updating the descriptor tail
+ */
+ write_barrier(ct);
+
/* now update desc tail (back in bytes) */
desc->tail = tail * 4;
return 0;
@@ -389,56 +455,6 @@ corrupted:
}
/**
- * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
- * @desc: buffer descriptor
- * @fence: response fence
- * @status: placeholder for status
- *
- * Guc will update CT buffer descriptor with new fence and status
- * after processing the command identified by the fence. Wait for
- * specified fence and then read from the descriptor status of the
- * command.
- *
- * Return:
- * * 0 response received (status is valid)
- * * -ETIMEDOUT no response within hardcoded timeout
- * * -EPROTO no response, CT buffer is in error
- */
-static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
- u32 fence,
- u32 *status)
-{
- int err;
-
- /*
- * Fast commands should complete in less than 10us, so sample quickly
- * up to that length of time, then switch to a slower sleep-wait loop.
- * No GuC command should ever take longer than 10ms.
- */
-#define done (READ_ONCE(desc->fence) == fence)
- err = wait_for_us(done, 10);
- if (err)
- err = wait_for(done, 10);
-#undef done
-
- if (unlikely(err)) {
- DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
- fence, desc->fence);
-
- if (WARN_ON(desc->is_in_error)) {
- /* Something went wrong with the messaging, try to reset
- * the buffer and hope for the best
- */
- guc_ct_buffer_desc_reset(desc);
- err = -EPROTO;
- }
- }
-
- *status = desc->status;
- return err;
-}
-
-/**
* wait_for_ct_request_update - Wait for CT request state update.
* @req: pointer to pending request
* @status: placeholder for status
@@ -481,8 +497,6 @@ static int ct_send(struct intel_guc_ct *ct,
u32 response_buf_size,
u32 *status)
{
- struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
- struct guc_ct_buffer_desc *desc = ctb->desc;
struct ct_request request;
unsigned long flags;
u32 fence;
@@ -493,26 +507,28 @@ static int ct_send(struct intel_guc_ct *ct,
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
GEM_BUG_ON(!response_buf && response_buf_size);
+ spin_lock_irqsave(&ct->ctbs.send.lock, flags);
+
fence = ct_get_next_fence(ct);
request.fence = fence;
request.status = 0;
request.response_len = response_buf_size;
request.response_buf = response_buf;
- spin_lock_irqsave(&ct->requests.lock, flags);
+ spin_lock(&ct->requests.lock);
list_add_tail(&request.link, &ct->requests.pending);
- spin_unlock_irqrestore(&ct->requests.lock, flags);
+ spin_unlock(&ct->requests.lock);
+
+ err = ct_write(ct, action, len, fence);
+
+ spin_unlock_irqrestore(&ct->ctbs.send.lock, flags);
- err = ct_write(ct, action, len, fence, !!response_buf);
if (unlikely(err))
goto unlink;
intel_guc_notify(ct_to_guc(ct));
- if (response_buf)
- err = wait_for_ct_request_update(&request, status);
- else
- err = wait_for_ctb_desc_update(desc, fence, status);
+ err = wait_for_ct_request_update(&request, status);
if (unlikely(err))
goto unlink;
@@ -547,7 +563,6 @@ unlink:
int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- struct intel_guc *guc = ct_to_guc(ct);
u32 status = ~0; /* undefined */
int ret;
@@ -556,8 +571,6 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
return -ENODEV;
}
- mutex_lock(&guc->send_mutex);
-
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
@@ -567,7 +580,6 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
action[0], ret, ret);
}
- mutex_unlock(&guc->send_mutex);
return ret;
}
@@ -586,22 +598,42 @@ static inline bool ct_header_is_response(u32 header)
return !!(header & GUC_CT_MSG_IS_RESPONSE);
}
-static int ct_read(struct intel_guc_ct *ct, u32 *data)
+static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
{
- struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
+ struct ct_incoming_msg *msg;
+
+ msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC);
+ if (msg)
+ msg->size = num_dwords;
+ return msg;
+}
+
+static void ct_free_msg(struct ct_incoming_msg *msg)
+{
+ kfree(msg);
+}
+
+/*
+ * Return: number available remaining dwords to read (0 if empty)
+ * or a negative error code on failure
+ */
+static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
+{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
struct guc_ct_buffer_desc *desc = ctb->desc;
u32 head = desc->head;
u32 tail = desc->tail;
- u32 size = desc->size;
+ u32 size = ctb->size;
u32 *cmds = ctb->cmds;
s32 available;
unsigned int len;
unsigned int i;
+ u32 header;
if (unlikely(desc->is_in_error))
return -EPIPE;
- if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
+ if (unlikely(!IS_ALIGNED(head | tail, 4) ||
(tail | head) >= size))
goto corrupted;
@@ -612,8 +644,10 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
/* tail == head condition indicates empty */
available = tail - head;
- if (unlikely(available == 0))
- return -ENODATA;
+ if (unlikely(available == 0)) {
+ *msg = NULL;
+ return 0;
+ }
/* beware of buffer wrap case */
if (unlikely(available < 0))
@@ -621,14 +655,14 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
GEM_BUG_ON(available < 0);
- data[0] = cmds[head];
+ header = cmds[head];
head = (head + 1) % size;
/* message len with header */
- len = ct_header_get_len(data[0]) + 1;
+ len = ct_header_get_len(header) + 1;
if (unlikely(len > (u32)available)) {
CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
- 4, data,
+ 4, &header,
4 * (head + available - 1 > size ?
size - head : available - 1), &cmds[head],
4 * (head + available - 1 > size ?
@@ -636,14 +670,27 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
goto corrupted;
}
+ *msg = ct_alloc_msg(len);
+ if (!*msg) {
+ CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
+ 4, &header,
+ 4 * (head + available - 1 > size ?
+ size - head : available - 1), &cmds[head],
+ 4 * (head + available - 1 > size ?
+ available - 1 - size + head : 0), &cmds[0]);
+ return available;
+ }
+
+ (*msg)->msg[0] = header;
+
for (i = 1; i < len; i++) {
- data[i] = cmds[head];
+ (*msg)->msg[i] = cmds[head];
head = (head + 1) % size;
}
- CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
+ CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
desc->head = head * 4;
- return 0;
+ return available - len;
corrupted:
CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
@@ -670,39 +717,39 @@ corrupted:
* ^-----------------------len-----------------------^
*/
-static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
+static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
{
- u32 header = msg[0];
+ u32 header = response->msg[0];
u32 len = ct_header_get_len(header);
- u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
u32 fence;
u32 status;
u32 datalen;
struct ct_request *req;
+ unsigned long flags;
bool found = false;
+ int err = 0;
GEM_BUG_ON(!ct_header_is_response(header));
- GEM_BUG_ON(!in_irq());
/* Response payload shall at least include fence and status */
if (unlikely(len < 2)) {
- CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
+ CT_ERROR(ct, "Corrupted response (len %u)\n", len);
return -EPROTO;
}
- fence = msg[1];
- status = msg[2];
+ fence = response->msg[1];
+ status = response->msg[2];
datalen = len - 2;
/* Format of the status follows RESPONSE message */
if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
- CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
+ CT_ERROR(ct, "Corrupted response (status %#x)\n", status);
return -EPROTO;
}
CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
- spin_lock(&ct->requests.lock);
+ spin_lock_irqsave(&ct->requests.lock, flags);
list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) {
CT_DEBUG(ct, "request %u awaits response\n",
@@ -710,58 +757,75 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
continue;
}
if (unlikely(datalen > req->response_len)) {
- CT_ERROR(ct, "Response for %u is too long %*ph\n",
- req->fence, msgsize, msg);
- datalen = 0;
+ CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
+ req->fence, datalen, req->response_len);
+ datalen = min(datalen, req->response_len);
+ err = -EMSGSIZE;
}
if (datalen)
- memcpy(req->response_buf, msg + 3, 4 * datalen);
+ memcpy(req->response_buf, response->msg + 3, 4 * datalen);
req->response_len = datalen;
WRITE_ONCE(req->status, status);
found = true;
break;
}
- spin_unlock(&ct->requests.lock);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
+
+ if (!found) {
+ CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
+ return -ENOKEY;
+ }
+
+ if (unlikely(err))
+ return err;
- if (!found)
- CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
+ ct_free_msg(response);
return 0;
}
-static void ct_process_request(struct intel_guc_ct *ct,
- u32 action, u32 len, const u32 *payload)
+static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
{
struct intel_guc *guc = ct_to_guc(ct);
+ u32 header, action, len;
+ const u32 *payload;
int ret;
+ header = request->msg[0];
+ payload = &request->msg[1];
+ action = ct_header_get_action(header);
+ len = ct_header_get_len(header);
+
CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
switch (action) {
case INTEL_GUC_ACTION_DEFAULT:
ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
- if (unlikely(ret))
- goto fail_unexpected;
break;
-
default:
-fail_unexpected:
- CT_ERROR(ct, "Unexpected request %x %*ph\n",
- action, 4 * len, payload);
+ ret = -EOPNOTSUPP;
break;
}
+
+ if (unlikely(ret)) {
+ CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
+ action, ERR_PTR(ret));
+ return ret;
+ }
+
+ ct_free_msg(request);
+ return 0;
}
static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
{
unsigned long flags;
- struct ct_incoming_request *request;
- u32 header;
- u32 *payload;
+ struct ct_incoming_msg *request;
bool done;
+ int err;
spin_lock_irqsave(&ct->requests.lock, flags);
request = list_first_entry_or_null(&ct->requests.incoming,
- struct ct_incoming_request, link);
+ struct ct_incoming_msg, link);
if (request)
list_del(&request->link);
done = !!list_empty(&ct->requests.incoming);
@@ -770,14 +834,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
if (!request)
return true;
- header = request->msg[0];
- payload = &request->msg[1];
- ct_process_request(ct,
- ct_header_get_action(header),
- ct_header_get_len(header),
- payload);
+ err = ct_process_request(ct, request);
+ if (unlikely(err)) {
+ CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
+ ERR_PTR(err), 4 * request->size, request->msg);
+ ct_free_msg(request);
+ }
- kfree(request);
return done;
}
@@ -810,22 +873,11 @@ static void ct_incoming_request_worker_func(struct work_struct *w)
* ^-----------------------len-----------------------^
*/
-static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
+static int ct_handle_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
{
- u32 header = msg[0];
- u32 len = ct_header_get_len(header);
- u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
- struct ct_incoming_request *request;
unsigned long flags;
- GEM_BUG_ON(ct_header_is_response(header));
-
- request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC);
- if (unlikely(!request)) {
- CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg);
- return 0; /* XXX: -ENOMEM ? */
- }
- memcpy(request->msg, msg, msgsize);
+ GEM_BUG_ON(ct_header_is_response(request->msg[0]));
spin_lock_irqsave(&ct->requests.lock, flags);
list_add_tail(&request->link, &ct->requests.incoming);
@@ -835,28 +887,74 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
return 0;
}
+static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
+{
+ u32 header = msg->msg[0];
+ int err;
+
+ if (ct_header_is_response(header))
+ err = ct_handle_response(ct, msg);
+ else
+ err = ct_handle_request(ct, msg);
+
+ if (unlikely(err)) {
+ CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
+ ERR_PTR(err), 4 * msg->size, msg->msg);
+ ct_free_msg(msg);
+ }
+}
+
+/*
+ * Return: number available remaining dwords to read (0 if empty)
+ * or a negative error code on failure
+ */
+static int ct_receive(struct intel_guc_ct *ct)
+{
+ struct ct_incoming_msg *msg = NULL;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
+ ret = ct_read(ct, &msg);
+ spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
+ if (ret < 0)
+ return ret;
+
+ if (msg)
+ ct_handle_msg(ct, msg);
+
+ return ret;
+}
+
+static void ct_try_receive_message(struct intel_guc_ct *ct)
+{
+ int ret;
+
+ if (GEM_WARN_ON(!ct->enabled))
+ return;
+
+ ret = ct_receive(ct);
+ if (ret > 0)
+ tasklet_hi_schedule(&ct->receive_tasklet);
+}
+
+static void ct_receive_tasklet_func(struct tasklet_struct *t)
+{
+ struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
+
+ ct_try_receive_message(ct);
+}
+
/*
* When we're communicating with the GuC over CT, GuC uses events
* to notify us about new messages being posted on the RECV buffer.
*/
void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{
- u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
- int err = 0;
-
if (unlikely(!ct->enabled)) {
WARN(1, "Unexpected GuC event received while CT disabled!\n");
return;
}
- do {
- err = ct_read(ct, msg);
- if (err)
- break;
-
- if (ct_header_is_response(msg[0]))
- err = ct_handle_response(ct, msg);
- else
- err = ct_handle_request(ct, msg);
- } while (!err);
+ ct_try_receive_message(ct);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 494a51a5200f..cb222f202301 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -6,6 +6,7 @@
#ifndef _INTEL_GUC_CT_H_
#define _INTEL_GUC_CT_H_
+#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -27,12 +28,16 @@ struct intel_guc;
* record (command transport buffer descriptor) and the actual buffer which
* holds the commands.
*
+ * @lock: protects access to the commands buffer and buffer descriptor
* @desc: pointer to the buffer descriptor
* @cmds: pointer to the commands buffer
+ * @size: size of the commands buffer
*/
struct intel_guc_ct_buffer {
+ spinlock_t lock;
struct guc_ct_buffer_desc *desc;
u32 *cmds;
+ u32 size;
};
@@ -45,8 +50,13 @@ struct intel_guc_ct {
struct i915_vma *vma;
bool enabled;
- /* buffers for sending(0) and receiving(1) commands */
- struct intel_guc_ct_buffer ctbs[2];
+ /* buffers for sending and receiving commands */
+ struct {
+ struct intel_guc_ct_buffer send;
+ struct intel_guc_ct_buffer recv;
+ } ctbs;
+
+ struct tasklet_struct receive_tasklet;
struct {
u32 last_fence; /* last fence used to send request */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 2270d6b3b272..76fe766ad1bc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -30,7 +30,7 @@ static void guc_prepare_xfer(struct intel_uncore *uncore)
else
intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- if (IS_GEN(uncore->i915, 9)) {
+ if (GRAPHICS_VER(uncore->i915) == 9) {
/* DOP Clock Gating Enable for GuC clocks */
intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 79c560d9c0b6..e9a9d85e2aa3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -9,6 +9,13 @@
#include <linux/bits.h>
#include <linux/compiler.h>
#include <linux/types.h>
+#include "gt/intel_engine_types.h"
+
+#include "abi/guc_actions_abi.h"
+#include "abi/guc_errors_abi.h"
+#include "abi/guc_communication_mmio_abi.h"
+#include "abi/guc_communication_ctb_abi.h"
+#include "abi/guc_messages_abi.h"
#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
#define GUC_CLIENT_PRIORITY_HIGH 1
@@ -26,6 +33,12 @@
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+#define GUC_RENDER_CLASS 0
+#define GUC_VIDEO_CLASS 1
+#define GUC_VIDEOENHANCE_CLASS 2
+#define GUC_BLITTER_CLASS 3
+#define GUC_RESERVED_CLASS 4
+#define GUC_LAST_ENGINE_CLASS GUC_RESERVED_CLASS
#define GUC_MAX_ENGINE_CLASSES 16
#define GUC_MAX_INSTANCES_PER_CLASS 32
@@ -123,6 +136,25 @@
#define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \
(((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT)
+static inline u8 engine_class_to_guc_class(u8 class)
+{
+ BUILD_BUG_ON(GUC_RENDER_CLASS != RENDER_CLASS);
+ BUILD_BUG_ON(GUC_BLITTER_CLASS != COPY_ENGINE_CLASS);
+ BUILD_BUG_ON(GUC_VIDEO_CLASS != VIDEO_DECODE_CLASS);
+ BUILD_BUG_ON(GUC_VIDEOENHANCE_CLASS != VIDEO_ENHANCEMENT_CLASS);
+ GEM_BUG_ON(class > MAX_ENGINE_CLASS || class == OTHER_CLASS);
+
+ return class;
+}
+
+static inline u8 guc_class_to_engine_class(u8 guc_class)
+{
+ GEM_BUG_ON(guc_class > GUC_LAST_ENGINE_CLASS);
+ GEM_BUG_ON(guc_class == GUC_RESERVED_CLASS);
+
+ return guc_class;
+}
+
/* Work item for submitting workloads into work queue of GuC. */
struct guc_wq_item {
u32 header;
@@ -207,104 +239,6 @@ struct guc_stage_desc {
u64 desc_private;
} __packed;
-/**
- * DOC: CTB based communication
- *
- * The CTB (command transport buffer) communication between Host and GuC
- * is based on u32 data stream written to the shared buffer. One buffer can
- * be used to transmit data only in one direction (one-directional channel).
- *
- * Current status of the each buffer is stored in the buffer descriptor.
- * Buffer descriptor holds tail and head fields that represents active data
- * stream. The tail field is updated by the data producer (sender), and head
- * field is updated by the data consumer (receiver)::
- *
- * +------------+
- * | DESCRIPTOR | +=================+============+========+
- * +============+ | | MESSAGE(s) | |
- * | address |--------->+=================+============+========+
- * +------------+
- * | head | ^-----head--------^
- * +------------+
- * | tail | ^---------tail-----------------^
- * +------------+
- * | size | ^---------------size--------------------^
- * +------------+
- *
- * Each message in data stream starts with the single u32 treated as a header,
- * followed by optional set of u32 data that makes message specific payload::
- *
- * +------------+---------+---------+---------+
- * | MESSAGE |
- * +------------+---------+---------+---------+
- * | msg[0] | [1] | ... | [n-1] |
- * +------------+---------+---------+---------+
- * | MESSAGE | MESSAGE PAYLOAD |
- * + HEADER +---------+---------+---------+
- * | | 0 | ... | n |
- * +======+=====+=========+=========+=========+
- * | 31:16| code| | | |
- * +------+-----+ | | |
- * | 15:5|flags| | | |
- * +------+-----+ | | |
- * | 4:0| len| | | |
- * +------+-----+---------+---------+---------+
- *
- * ^-------------len-------------^
- *
- * The message header consists of:
- *
- * - **len**, indicates length of the message payload (in u32)
- * - **code**, indicates message code
- * - **flags**, holds various bits to control message handling
- */
-
-/*
- * Describes single command transport buffer.
- * Used by both guc-master and clients.
- */
-struct guc_ct_buffer_desc {
- u32 addr; /* gfx address */
- u64 host_private; /* host private data */
- u32 size; /* size in bytes */
- u32 head; /* offset updated by GuC*/
- u32 tail; /* offset updated by owner */
- u32 is_in_error; /* error indicator */
- u32 fence; /* fence updated by GuC */
- u32 status; /* status updated by GuC */
- u32 owner; /* id of the channel owner */
- u32 owner_sub_id; /* owner-defined field for extra tracking */
- u32 reserved[5];
-} __packed;
-
-/* Type of command transport buffer */
-#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u
-#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u
-
-/*
- * Definition of the command transport message header (DW0)
- *
- * bit[4..0] message len (in dwords)
- * bit[7..5] reserved
- * bit[8] response (G2H only)
- * bit[8] write fence to desc (H2G only)
- * bit[9] write status to H2G buff (H2G only)
- * bit[10] send status back via G2H (H2G only)
- * bit[15..11] reserved
- * bit[31..16] action code
- */
-#define GUC_CT_MSG_LEN_SHIFT 0
-#define GUC_CT_MSG_LEN_MASK 0x1F
-#define GUC_CT_MSG_IS_RESPONSE (1 << 8)
-#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8)
-#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9)
-#define GUC_CT_MSG_SEND_STATUS (1 << 10)
-#define GUC_CT_MSG_ACTION_SHIFT 16
-#define GUC_CT_MSG_ACTION_MASK 0xFFFF
-
-#define GUC_FORCEWAKE_RENDER (1 << 0)
-#define GUC_FORCEWAKE_MEDIA (1 << 1)
-
#define GUC_POWER_UNSPECIFIED 0
#define GUC_POWER_D0 1
#define GUC_POWER_D1 2
@@ -480,120 +414,17 @@ struct guc_shared_ctx_data {
struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM];
} __packed;
-/**
- * DOC: MMIO based communication
- *
- * The MMIO based communication between Host and GuC uses software scratch
- * registers, where first register holds data treated as message header,
- * and other registers are used to hold message payload.
- *
- * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
- * but no H2G command takes more than 8 parameters and the GuC FW
- * itself uses an 8-element array to store the H2G message.
- *
- * +-----------+---------+---------+---------+
- * | MMIO[0] | MMIO[1] | ... | MMIO[n] |
- * +-----------+---------+---------+---------+
- * | header | optional payload |
- * +======+====+=========+=========+=========+
- * | 31:28|type| | | |
- * +------+----+ | | |
- * | 27:16|data| | | |
- * +------+----+ | | |
- * | 15:0|code| | | |
- * +------+----+---------+---------+---------+
- *
- * The message header consists of:
- *
- * - **type**, indicates message type
- * - **code**, indicates message code, is specific for **type**
- * - **data**, indicates message data, optional, depends on **code**
- *
- * The following message **types** are supported:
- *
- * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code
- * must be priovided in **code** field. Optional action specific parameters
- * can be provided in remaining payload registers or **data** field.
- *
- * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request,
- * action response status will be provided in **code** field. Optional
- * response data can be returned in remaining payload registers or **data**
- * field.
- */
-
-#define GUC_MAX_MMIO_MSG_LEN 8
-
-#define INTEL_GUC_MSG_TYPE_SHIFT 28
-#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
-#define INTEL_GUC_MSG_DATA_SHIFT 16
-#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT)
-#define INTEL_GUC_MSG_CODE_SHIFT 0
-#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT)
-
#define __INTEL_GUC_MSG_GET(T, m) \
(((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT)
#define INTEL_GUC_MSG_TO_TYPE(m) __INTEL_GUC_MSG_GET(TYPE, m)
#define INTEL_GUC_MSG_TO_DATA(m) __INTEL_GUC_MSG_GET(DATA, m)
#define INTEL_GUC_MSG_TO_CODE(m) __INTEL_GUC_MSG_GET(CODE, m)
-enum intel_guc_msg_type {
- INTEL_GUC_MSG_TYPE_REQUEST = 0x0,
- INTEL_GUC_MSG_TYPE_RESPONSE = 0xF,
-};
-
#define __INTEL_GUC_MSG_TYPE_IS(T, m) \
(INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T)
#define INTEL_GUC_MSG_IS_REQUEST(m) __INTEL_GUC_MSG_TYPE_IS(REQUEST, m)
#define INTEL_GUC_MSG_IS_RESPONSE(m) __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m)
-enum intel_guc_action {
- INTEL_GUC_ACTION_DEFAULT = 0x0,
- INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
- INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
- INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
- INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
- INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
- INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40,
- INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
- INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
- INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
- INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
- INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005,
- INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
- INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
- INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
- INTEL_GUC_ACTION_LIMIT
-};
-
-enum intel_guc_preempt_options {
- INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
- INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
-};
-
-enum intel_guc_report_status {
- INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0,
- INTEL_GUC_REPORT_STATUS_ACKED = 0x1,
- INTEL_GUC_REPORT_STATUS_ERROR = 0x2,
- INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
-};
-
-enum intel_guc_sleep_state_status {
- INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1,
- INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2,
- INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3
-#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
-};
-
-#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
-#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
-#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
-#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
-
-enum intel_guc_response_status {
- INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
- INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
-};
-
#define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \
(typecheck(u32, (m)) && \
((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
index 129e0cf7dfe2..64e0b86bf258 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
@@ -121,4 +121,3 @@ void intel_guc_log_debugfs_register(struct intel_guc_log *log,
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), log);
}
-
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 92688a9b6717..7c8ff9792f7b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -11,6 +11,7 @@
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_lrc.h"
#include "gt/intel_mocs.h"
@@ -264,6 +265,14 @@ static void guc_submission_tasklet(struct tasklet_struct *t)
spin_unlock_irqrestore(&engine->active.lock, flags);
}
+static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ if (iir & GT_RENDER_USER_INTERRUPT) {
+ intel_engine_signal_breadcrumbs(engine);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+}
+
static void guc_reset_prepare(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -423,32 +432,6 @@ void intel_guc_submission_fini(struct intel_guc *guc)
}
}
-static void guc_interrupts_capture(struct intel_gt *gt)
-{
- struct intel_uncore *uncore = gt->uncore;
- u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
- u32 dmask = irqs << 16 | irqs;
-
- GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
-
- /* Don't handle the ctx switch interrupt in GuC submission mode */
- intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0);
- intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0);
-}
-
-static void guc_interrupts_release(struct intel_gt *gt)
-{
- struct intel_uncore *uncore = gt->uncore;
- u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
- u32 dmask = irqs << 16 | irqs;
-
- GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
-
- /* Handle ctx switch interrupts again */
- intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask);
- intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
-}
-
static int guc_context_alloc(struct intel_context *ce)
{
return lrc_alloc(ce, ce->engine);
@@ -608,35 +591,6 @@ static int guc_resume(struct intel_engine_cs *engine)
static void guc_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = guc_submit_request;
- engine->schedule = i915_schedule;
- engine->execlists.tasklet.callback = guc_submission_tasklet;
-
- engine->reset.prepare = guc_reset_prepare;
- engine->reset.rewind = guc_reset_rewind;
- engine->reset.cancel = guc_reset_cancel;
- engine->reset.finish = guc_reset_finish;
-
- engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
-
- /*
- * TODO: GuC supports timeslicing and semaphores as well, but they're
- * handled by the firmware so some minor tweaks are required before
- * enabling.
- *
- * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
- * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- */
-
- engine->emit_bb_start = gen8_emit_bb_start;
-
- /*
- * For the breadcrumb irq to work we need the interrupts to stay
- * enabled. However, on all platforms on which we'll have support for
- * GuC submission we don't allow disabling the interrupts at runtime, so
- * we're always safe with the current flow.
- */
- GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
}
static void guc_release(struct intel_engine_cs *engine)
@@ -658,19 +612,39 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->cops = &guc_context_ops;
engine->request_alloc = guc_request_alloc;
+ engine->schedule = i915_schedule;
+
+ engine->reset.prepare = guc_reset_prepare;
+ engine->reset.rewind = guc_reset_rewind;
+ engine->reset.cancel = guc_reset_cancel;
+ engine->reset.finish = guc_reset_finish;
+
engine->emit_flush = gen8_emit_flush_xcs;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
- if (INTEL_GEN(engine->i915) >= 12) {
+ if (GRAPHICS_VER(engine->i915) >= 12) {
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
engine->emit_flush = gen12_emit_flush_xcs;
}
engine->set_default_submission = guc_set_default_submission;
+
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+
+ /*
+ * TODO: GuC supports timeslicing and semaphores as well, but they're
+ * handled by the firmware so some minor tweaks are required before
+ * enabling.
+ *
+ * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ */
+
+ engine->emit_bb_start = gen8_emit_bb_start;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
{
- switch (INTEL_GEN(engine->i915)) {
+ switch (GRAPHICS_VER(engine->i915)) {
case 12:
engine->emit_flush = gen12_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
@@ -689,6 +663,7 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
static inline void guc_default_irqs(struct intel_engine_cs *engine)
{
engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
+ intel_engine_set_irq_handler(engine, cs_irq_handler);
}
int intel_guc_submission_setup(struct intel_engine_cs *engine)
@@ -699,7 +674,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
* The setup relies on several assumptions (e.g. irqs always enabled)
* that are only valid on gen11+
*/
- GEM_BUG_ON(INTEL_GEN(i915) < 11);
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
@@ -721,9 +696,6 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
void intel_guc_submission_enable(struct intel_guc *guc)
{
guc_stage_desc_init(guc);
-
- /* Take over from manual control of ELSP (execlists) */
- guc_interrupts_capture(guc_to_gt(guc));
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -734,8 +706,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
/* Note: By the time we're here, GuC may have already been reset */
- guc_interrupts_release(gt);
-
guc_stage_desc_fini(guc);
}
@@ -753,8 +723,3 @@ void intel_guc_submission_init_early(struct intel_guc *guc)
{
guc->submission_selected = __guc_submission_selected(guc);
}
-
-bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
-{
- return engine->set_default_submission == guc_set_default_submission;
-}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 5f7b9e6347d0..3f7005018939 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -20,7 +20,6 @@ void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
int intel_guc_submission_setup(struct intel_engine_cs *engine);
-bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 2126dd81ac38..fc5387b410a2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -43,7 +43,7 @@ void intel_huc_init_early(struct intel_huc *huc)
intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
huc->status.mask = HUC_LOAD_SUCCESSFUL;
huc->status.value = HUC_LOAD_SUCCESSFUL;
@@ -82,7 +82,9 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(gt->i915,
+ vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 6abb8f2dc33d..6d8b9233214e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -23,7 +23,7 @@ static void uc_expand_default_options(struct intel_uc *uc)
return;
/* Don't enable GuC/HuC on pre-Gen12 */
- if (INTEL_GEN(i915) < 12) {
+ if (GRAPHICS_VER(i915) < 12) {
i915->params.enable_guc = 0;
return;
}
@@ -467,7 +467,7 @@ static int __uc_init_hw(struct intel_uc *uc)
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
- if (IS_GEN(i915, 9))
+ if (GRAPHICS_VER(i915) == 9)
attempts = 3;
else
attempts = 1;
@@ -502,10 +502,6 @@ static int __uc_init_hw(struct intel_uc *uc)
intel_huc_auth(huc);
- ret = intel_guc_sample_forcewake(guc);
- if (ret)
- goto err_communication;
-
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_enable(guc);
@@ -529,8 +525,6 @@ static int __uc_init_hw(struct intel_uc *uc)
/*
* We've failed to load the firmware :(
*/
-err_communication:
- guc_disable_communication(guc);
err_log_capture:
__uc_capture_load_err_log(uc);
err_out:
@@ -558,9 +552,6 @@ static void __uc_fini_hw(struct intel_uc *uc)
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_disable(guc);
- if (guc_communication_enabled(guc))
- guc_disable_communication(guc);
-
__uc_sanitize(uc);
}
@@ -577,7 +568,6 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
if (!intel_guc_is_ready(guc))
return;
- guc_disable_communication(guc);
__uc_sanitize(uc);
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index ca9c9e27a43d..c4118b808268 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1006,7 +1006,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* update reg values in it into vregs, so LRIs in workload with
* inhibit context will restore with correct values
*/
- if (IS_GEN(s->engine->i915, 9) &&
+ if (GRAPHICS_VER(s->engine->i915) == 9 &&
intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
@@ -1390,7 +1390,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (INTEL_GEN(s->engine->i915) >= 9) {
+ if (GRAPHICS_VER(s->engine->i915) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1418,7 +1418,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (GRAPHICS_VER(dev_priv) >= 9) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1446,7 +1446,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
{
if (IS_BROADWELL(s->engine->i915))
return gen8_decode_mi_display_flip(s, info);
- if (INTEL_GEN(s->engine->i915) >= 9)
+ if (GRAPHICS_VER(s->engine->i915) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index d4f883f35b95..8e65cd8258b9 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -223,7 +223,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (GRAPHICS_VER(dev_priv) >= 9) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 0889ad8291b0..11a8baba6822 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -151,7 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (GRAPHICS_VER(dev_priv) >= 9) {
switch (tiled) {
case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64;
@@ -215,7 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled)
return -ENODEV;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (GRAPHICS_VER(dev_priv) >= 9) {
plane->tiled = val & PLANE_CTL_TILED_MASK;
fmt = skl_format_to_drm(
val & PLANE_CTL_FORMAT_MASK,
@@ -256,9 +256,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
- (INTEL_GEN(dev_priv) >= 9) ?
- (_PRI_PLANE_STRIDE_MASK >> 6) :
- _PRI_PLANE_STRIDE_MASK, plane->bpp);
+ (GRAPHICS_VER(dev_priv) >= 9) ?
+ (_PRI_PLANE_STRIDE_MASK >> 6) :
+ _PRI_PLANE_STRIDE_MASK, plane->bpp);
plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
_PIPE_H_SRCSZ_SHIFT;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 9478c132d7b6..cc2c05e18206 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1055,12 +1055,12 @@ static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
+ if (GRAPHICS_VER(dev_priv) == 9 || GRAPHICS_VER(dev_priv) == 10) {
u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
GAMW_ECO_ENABLE_64K_IPS_FIELD;
return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
- } else if (INTEL_GEN(dev_priv) >= 11) {
+ } else if (GRAPHICS_VER(dev_priv) >= 11) {
/* 64K paging only controlled by IPS bit in PTE now. */
return true;
} else
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index e7c2babcee8b..cbac409f6c8a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -46,118 +46,6 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
-static struct intel_vgpu_type *
-intel_gvt_find_vgpu_type(struct intel_gvt *gvt, unsigned int type_group_id)
-{
- if (WARN_ON(type_group_id >= gvt->num_types))
- return NULL;
- return &gvt->types[type_group_id];
-}
-
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
-{
- struct intel_vgpu_type *type;
- unsigned int num = 0;
- void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
- if (!type)
- num = 0;
- else
- num = type->avail_instance;
-
- return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
- if (!type)
- return 0;
-
- return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\nresolution: %s\n"
- "weight: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution),
- type->weight);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-
-static struct attribute *gvt_type_attrs[] = {
- &mdev_type_attr_available_instances.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_description.attr,
- NULL,
-};
-
-static struct attribute_group *gvt_vgpu_type_groups[] = {
- [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
-{
- *intel_vgpu_type_groups = gvt_vgpu_type_groups;
- return true;
-}
-
-static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i, j;
- struct intel_vgpu_type *type;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- type = &gvt->types[i];
-
- group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
- if (WARN_ON(!group))
- goto unwind;
-
- group->name = type->name;
- group->attrs = gvt_type_attrs;
- gvt_vgpu_type_groups[i] = group;
- }
-
- return 0;
-
-unwind:
- for (j = 0; j < i; j++) {
- group = gvt_vgpu_type_groups[j];
- kfree(group);
- }
-
- return -ENOMEM;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- group = gvt_vgpu_type_groups[i];
- gvt_vgpu_type_groups[i] = NULL;
- kfree(group);
- }
-}
-
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
@@ -169,8 +57,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
- .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
- .get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
@@ -274,7 +160,6 @@ void intel_gvt_clean_device(struct drm_i915_private *i915)
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
intel_gvt_debugfs_clean(gvt);
@@ -363,12 +248,6 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
if (ret)
goto out_clean_thread;
- ret = intel_gvt_init_vgpu_type_groups(gvt);
- if (ret) {
- gvt_err("failed to init vgpu type groups: %d\n", ret);
- goto out_clean_types;
- }
-
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
@@ -454,7 +333,8 @@ EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
void
intel_gvt_unregister_hypervisor(void)
{
- intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
+ void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
+ intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 88ab360fcb31..0c0615602343 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -574,9 +574,6 @@ struct intel_gvt_ops {
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
- struct intel_vgpu_type *(*gvt_find_vgpu_type)(
- struct intel_gvt *gvt, unsigned int type_group_id);
- bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index dda320749c65..98eb48c24c46 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -220,7 +220,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
{
u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
- if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) {
+ if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
else if (!ips)
@@ -286,7 +286,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) {
+ if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -1174,7 +1174,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9)
+ if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9)
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
@@ -3342,9 +3342,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
- MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
- MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
- MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
+ MMIO_D(DMC_SSP_BASE, D_SKL_PLUS);
+ MMIO_D(DMC_HTP_SKL, D_SKL_PLUS);
+ MMIO_D(DMC_LAST_WRITE, D_SKL_PLUS);
MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
@@ -3655,7 +3655,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
* otherwise, need to update cmd_reg_handler in cmd_parser.c
*/
static struct gvt_mmio_block mmio_blocks[] = {
- {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+ {D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index b79da5124f83..f33e3cbd0439 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -49,7 +49,7 @@ enum hypervisor_type {
struct intel_gvt_mpt {
enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
- void (*host_exit)(struct device *dev);
+ void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 497d28ce47df..614b951d919f 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -585,7 +585,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (INTEL_GEN(gvt->gt->i915) >= 9) {
+ } else if (GRAPHICS_VER(gvt->gt->i915) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 65ff43cfc0f7..48b4d4cf805d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -144,6 +144,104 @@ static inline bool handle_valid(unsigned long handle)
return !!(handle & ~0xff);
}
+static ssize_t available_instances_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ unsigned int num = 0;
+ struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+ type = &gvt->types[mtype_get_type_group_id(mtype)];
+ if (!type)
+ num = 0;
+ else
+ num = type->avail_instance;
+
+ return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ struct intel_vgpu_type *type;
+ struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+ type = &gvt->types[mtype_get_type_group_id(mtype)];
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+ "fence: %d\nresolution: %s\n"
+ "weight: %d\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence, vgpu_edid_str(type->resolution),
+ type->weight);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *gvt_type_attrs[] = {
+ &mdev_type_attr_available_instances.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_description.attr,
+ NULL,
+};
+
+static struct attribute_group *gvt_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (!group)
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = gvt_type_attrs;
+ gvt_vgpu_type_groups[i] = group;
+ }
+
+ return 0;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = gvt_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return -ENOMEM;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = gvt_vgpu_type_groups[i];
+ gvt_vgpu_type_groups[i] = NULL;
+ kfree(group);
+ }
+}
+
static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
@@ -694,14 +792,13 @@ static int intel_vgpu_create(struct mdev_device *mdev)
struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type;
struct device *pdev;
- void *gvt;
+ struct intel_gvt *gvt;
int ret;
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
- type = intel_gvt_ops->gvt_find_vgpu_type(gvt,
- mdev_get_type_group_id(mdev));
+ type = &gvt->types[mdev_get_type_group_id(mdev)];
if (!type) {
ret = -EINVAL;
goto out;
@@ -1667,19 +1764,26 @@ static struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
- struct attribute_group **kvm_vgpu_type_groups;
+ int ret;
+
+ ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt);
+ if (ret)
+ return ret;
intel_gvt_ops = ops;
- if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
- return -EFAULT;
- intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
+ intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups;
- return mdev_register_device(dev, &intel_vgpu_ops);
+ ret = mdev_register_device(dev, &intel_vgpu_ops);
+ if (ret)
+ intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
+
+ return ret;
}
-static void kvmgt_host_exit(struct device *dev)
+static void kvmgt_host_exit(struct device *dev, void *gvt)
{
mdev_unregister_device(dev);
+ intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
}
static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index c9589e26af93..b8ac80765461 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -373,7 +373,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
*/
fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (engine->id == RCS0 && INTEL_GEN(engine->i915) >= 9)
+ if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9)
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(uncore, fw);
@@ -409,7 +409,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
return;
- if (engine->id == RCS0 && IS_GEN(engine->i915, 9))
+ if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9)
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -474,7 +474,7 @@ static void switch_mmio(struct intel_vgpu *pre,
struct engine_mmio *mmio;
u32 old_v, new_v;
- if (INTEL_GEN(engine->i915) >= 9)
+ if (GRAPHICS_VER(engine->i915) >= 9)
switch_mocs(pre, next, engine);
for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
@@ -486,7 +486,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on gen9, it's initialized by lri command and
* save or restore with context together.
*/
- if (IS_GEN(engine->i915, 9) && mmio->in_context)
+ if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context)
continue;
// save
@@ -580,7 +580,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (INTEL_GEN(gvt->gt->i915) >= 9) {
+ if (GRAPHICS_VER(gvt->gt->i915) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 550a456e936f..e6c5a792a49a 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -63,13 +63,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
/**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/
-static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_exit)
return;
- intel_gvt_host.mpt->host_exit(dev);
+ intel_gvt_host.mpt->host_exit(dev, gvt);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index fc735692f21f..734c37c5e347 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -364,7 +364,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
u32 *cs;
int err;
- if (IS_GEN(req->engine->i915, 9) && is_inhibit_context(req->context))
+ if (GRAPHICS_VER(req->engine->i915) == 9 && is_inhibit_context(req->context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/*
@@ -1148,7 +1148,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
static int workload_thread(void *arg)
{
struct intel_engine_cs *engine = arg;
- const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
+ const bool need_force_wake = GRAPHICS_VER(engine->i915) >= 9;
struct intel_gvt *gvt = engine->i915->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 9039787f123a..fa6b92615799 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -149,10 +149,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
- if (IS_GEN(gvt->gt->i915, 8))
+ if (GRAPHICS_VER(gvt->gt->i915) == 8)
sprintf(gvt->types[i].name, "GVTg_V4_%s",
vgpu_types[i].name);
- else if (IS_GEN(gvt->gt->i915, 9))
+ else if (GRAPHICS_VER(gvt->gt->i915) == 9)
sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index aa573b078ae7..b1aa1c482c32 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -343,18 +343,15 @@ out:
void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
+ unsigned long flags,
struct lock_class_key *mkey,
struct lock_class_key *wkey)
{
- unsigned long bits;
-
debug_active_init(ref);
- ref->flags = 0;
+ ref->flags = flags;
ref->active = active;
- ref->retire = ptr_unpack_bits(retire, &bits, 2);
- if (bits & I915_ACTIVE_MAY_SLEEP)
- ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
+ ref->retire = retire;
spin_lock_init(&ref->tree_lock);
ref->tree = RB_ROOT;
@@ -1156,8 +1153,7 @@ static int auto_active(struct i915_active *ref)
return 0;
}
-__i915_active_call static void
-auto_retire(struct i915_active *ref)
+static void auto_retire(struct i915_active *ref)
{
i915_active_put(ref);
}
@@ -1171,7 +1167,7 @@ struct i915_active *i915_active_create(void)
return NULL;
kref_init(&aa->ref);
- i915_active_init(&aa->base, auto_active, auto_retire);
+ i915_active_init(&aa->base, auto_active, auto_retire, 0);
return &aa->base;
}
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index fb165d3f01cf..d0feda68b874 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -152,15 +152,16 @@ i915_active_fence_isset(const struct i915_active_fence *active)
void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
+ unsigned long flags,
struct lock_class_key *mkey,
struct lock_class_key *wkey);
/* Specialise each class of i915_active to avoid impossible lockdep cycles. */
-#define i915_active_init(ref, active, retire) do { \
- static struct lock_class_key __mkey; \
- static struct lock_class_key __wkey; \
- \
- __i915_active_init(ref, active, retire, &__mkey, &__wkey); \
+#define i915_active_init(ref, active, retire, flags) do { \
+ static struct lock_class_key __mkey; \
+ static struct lock_class_key __wkey; \
+ \
+ __i915_active_init(ref, active, retire, flags, &__mkey, &__wkey); \
} while (0)
struct dma_fence *
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 6360c3e4b765..c149f348a972 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -24,11 +24,6 @@ struct i915_active_fence {
struct active_node;
-#define I915_ACTIVE_MAY_SLEEP BIT(0)
-
-#define __i915_active_call __aligned(4)
-#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
-
struct i915_active {
atomic_t count;
struct mutex mutex;
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
deleted file mode 100644
index 3a2f6eecb2fc..000000000000
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ /dev/null
@@ -1,435 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#include <linux/kmemleak.h>
-#include <linux/slab.h>
-
-#include "i915_buddy.h"
-
-#include "i915_gem.h"
-#include "i915_globals.h"
-#include "i915_utils.h"
-
-static struct i915_global_block {
- struct i915_global base;
- struct kmem_cache *slab_blocks;
-} global;
-
-static void i915_global_buddy_shrink(void)
-{
- kmem_cache_shrink(global.slab_blocks);
-}
-
-static void i915_global_buddy_exit(void)
-{
- kmem_cache_destroy(global.slab_blocks);
-}
-
-static struct i915_global_block global = { {
- .shrink = i915_global_buddy_shrink,
- .exit = i915_global_buddy_exit,
-} };
-
-int __init i915_global_buddy_init(void)
-{
- global.slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
- if (!global.slab_blocks)
- return -ENOMEM;
-
- i915_global_register(&global.base);
- return 0;
-}
-
-static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent,
- unsigned int order,
- u64 offset)
-{
- struct i915_buddy_block *block;
-
- GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER);
-
- block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
- if (!block)
- return NULL;
-
- block->header = offset;
- block->header |= order;
- block->parent = parent;
-
- GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED);
- return block;
-}
-
-static void i915_block_free(struct i915_buddy_block *block)
-{
- kmem_cache_free(global.slab_blocks, block);
-}
-
-static void mark_allocated(struct i915_buddy_block *block)
-{
- block->header &= ~I915_BUDDY_HEADER_STATE;
- block->header |= I915_BUDDY_ALLOCATED;
-
- list_del(&block->link);
-}
-
-static void mark_free(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- block->header &= ~I915_BUDDY_HEADER_STATE;
- block->header |= I915_BUDDY_FREE;
-
- list_add(&block->link,
- &mm->free_list[i915_buddy_block_order(block)]);
-}
-
-static void mark_split(struct i915_buddy_block *block)
-{
- block->header &= ~I915_BUDDY_HEADER_STATE;
- block->header |= I915_BUDDY_SPLIT;
-
- list_del(&block->link);
-}
-
-int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
-{
- unsigned int i;
- u64 offset;
-
- if (size < chunk_size)
- return -EINVAL;
-
- if (chunk_size < PAGE_SIZE)
- return -EINVAL;
-
- if (!is_power_of_2(chunk_size))
- return -EINVAL;
-
- size = round_down(size, chunk_size);
-
- mm->size = size;
- mm->chunk_size = chunk_size;
- mm->max_order = ilog2(size) - ilog2(chunk_size);
-
- GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
-
- mm->free_list = kmalloc_array(mm->max_order + 1,
- sizeof(struct list_head),
- GFP_KERNEL);
- if (!mm->free_list)
- return -ENOMEM;
-
- for (i = 0; i <= mm->max_order; ++i)
- INIT_LIST_HEAD(&mm->free_list[i]);
-
- mm->n_roots = hweight64(size);
-
- mm->roots = kmalloc_array(mm->n_roots,
- sizeof(struct i915_buddy_block *),
- GFP_KERNEL);
- if (!mm->roots)
- goto out_free_list;
-
- offset = 0;
- i = 0;
-
- /*
- * Split into power-of-two blocks, in case we are given a size that is
- * not itself a power-of-two.
- */
- do {
- struct i915_buddy_block *root;
- unsigned int order;
- u64 root_size;
-
- root_size = rounddown_pow_of_two(size);
- order = ilog2(root_size) - ilog2(chunk_size);
-
- root = i915_block_alloc(NULL, order, offset);
- if (!root)
- goto out_free_roots;
-
- mark_free(mm, root);
-
- GEM_BUG_ON(i > mm->max_order);
- GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
-
- mm->roots[i] = root;
-
- offset += root_size;
- size -= root_size;
- i++;
- } while (size);
-
- return 0;
-
-out_free_roots:
- while (i--)
- i915_block_free(mm->roots[i]);
- kfree(mm->roots);
-out_free_list:
- kfree(mm->free_list);
- return -ENOMEM;
-}
-
-void i915_buddy_fini(struct i915_buddy_mm *mm)
-{
- int i;
-
- for (i = 0; i < mm->n_roots; ++i) {
- GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
- i915_block_free(mm->roots[i]);
- }
-
- kfree(mm->roots);
- kfree(mm->free_list);
-}
-
-static int split_block(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- unsigned int block_order = i915_buddy_block_order(block) - 1;
- u64 offset = i915_buddy_block_offset(block);
-
- GEM_BUG_ON(!i915_buddy_block_is_free(block));
- GEM_BUG_ON(!i915_buddy_block_order(block));
-
- block->left = i915_block_alloc(block, block_order, offset);
- if (!block->left)
- return -ENOMEM;
-
- block->right = i915_block_alloc(block, block_order,
- offset + (mm->chunk_size << block_order));
- if (!block->right) {
- i915_block_free(block->left);
- return -ENOMEM;
- }
-
- mark_free(mm, block->left);
- mark_free(mm, block->right);
-
- mark_split(block);
-
- return 0;
-}
-
-static struct i915_buddy_block *
-get_buddy(struct i915_buddy_block *block)
-{
- struct i915_buddy_block *parent;
-
- parent = block->parent;
- if (!parent)
- return NULL;
-
- if (parent->left == block)
- return parent->right;
-
- return parent->left;
-}
-
-static void __i915_buddy_free(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- struct i915_buddy_block *parent;
-
- while ((parent = block->parent)) {
- struct i915_buddy_block *buddy;
-
- buddy = get_buddy(block);
-
- if (!i915_buddy_block_is_free(buddy))
- break;
-
- list_del(&buddy->link);
-
- i915_block_free(block);
- i915_block_free(buddy);
-
- block = parent;
- }
-
- mark_free(mm, block);
-}
-
-void i915_buddy_free(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
- __i915_buddy_free(mm, block);
-}
-
-void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
-{
- struct i915_buddy_block *block, *on;
-
- list_for_each_entry_safe(block, on, objects, link) {
- i915_buddy_free(mm, block);
- cond_resched();
- }
- INIT_LIST_HEAD(objects);
-}
-
-/*
- * Allocate power-of-two block. The order value here translates to:
- *
- * 0 = 2^0 * mm->chunk_size
- * 1 = 2^1 * mm->chunk_size
- * 2 = 2^2 * mm->chunk_size
- * ...
- */
-struct i915_buddy_block *
-i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
-{
- struct i915_buddy_block *block = NULL;
- unsigned int i;
- int err;
-
- for (i = order; i <= mm->max_order; ++i) {
- block = list_first_entry_or_null(&mm->free_list[i],
- struct i915_buddy_block,
- link);
- if (block)
- break;
- }
-
- if (!block)
- return ERR_PTR(-ENOSPC);
-
- GEM_BUG_ON(!i915_buddy_block_is_free(block));
-
- while (i != order) {
- err = split_block(mm, block);
- if (unlikely(err))
- goto out_free;
-
- /* Go low */
- block = block->left;
- i--;
- }
-
- mark_allocated(block);
- kmemleak_update_trace(block);
- return block;
-
-out_free:
- if (i != order)
- __i915_buddy_free(mm, block);
- return ERR_PTR(err);
-}
-
-static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
-{
- return s1 <= e2 && e1 >= s2;
-}
-
-static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
-{
- return s1 <= s2 && e1 >= e2;
-}
-
-/*
- * Allocate range. Note that it's safe to chain together multiple alloc_ranges
- * with the same blocks list.
- *
- * Intended for pre-allocating portions of the address space, for example to
- * reserve a block for the initial framebuffer or similar, hence the expectation
- * here is that i915_buddy_alloc() is still the main vehicle for
- * allocations, so if that's not the case then the drm_mm range allocator is
- * probably a much better fit, and so you should probably go use that instead.
- */
-int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
- struct list_head *blocks,
- u64 start, u64 size)
-{
- struct i915_buddy_block *block;
- struct i915_buddy_block *buddy;
- LIST_HEAD(allocated);
- LIST_HEAD(dfs);
- u64 end;
- int err;
- int i;
-
- if (size < mm->chunk_size)
- return -EINVAL;
-
- if (!IS_ALIGNED(size | start, mm->chunk_size))
- return -EINVAL;
-
- if (range_overflows(start, size, mm->size))
- return -EINVAL;
-
- for (i = 0; i < mm->n_roots; ++i)
- list_add_tail(&mm->roots[i]->tmp_link, &dfs);
-
- end = start + size - 1;
-
- do {
- u64 block_start;
- u64 block_end;
-
- block = list_first_entry_or_null(&dfs,
- struct i915_buddy_block,
- tmp_link);
- if (!block)
- break;
-
- list_del(&block->tmp_link);
-
- block_start = i915_buddy_block_offset(block);
- block_end = block_start + i915_buddy_block_size(mm, block) - 1;
-
- if (!overlaps(start, end, block_start, block_end))
- continue;
-
- if (i915_buddy_block_is_allocated(block)) {
- err = -ENOSPC;
- goto err_free;
- }
-
- if (contains(start, end, block_start, block_end)) {
- if (!i915_buddy_block_is_free(block)) {
- err = -ENOSPC;
- goto err_free;
- }
-
- mark_allocated(block);
- list_add_tail(&block->link, &allocated);
- continue;
- }
-
- if (!i915_buddy_block_is_split(block)) {
- err = split_block(mm, block);
- if (unlikely(err))
- goto err_undo;
- }
-
- list_add(&block->right->tmp_link, &dfs);
- list_add(&block->left->tmp_link, &dfs);
- } while (1);
-
- list_splice_tail(&allocated, blocks);
- return 0;
-
-err_undo:
- /*
- * We really don't want to leave around a bunch of split blocks, since
- * bigger is better, so make sure we merge everything back before we
- * free the allocated blocks.
- */
- buddy = get_buddy(block);
- if (buddy &&
- (i915_buddy_block_is_free(block) &&
- i915_buddy_block_is_free(buddy)))
- __i915_buddy_free(mm, block);
-
-err_free:
- i915_buddy_free_list(mm, &allocated);
- return err;
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/i915_buddy.c"
-#endif
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
deleted file mode 100644
index 9ce5200f4001..000000000000
--- a/drivers/gpu/drm/i915/i915_buddy.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __I915_BUDDY_H__
-#define __I915_BUDDY_H__
-
-#include <linux/bitops.h>
-#include <linux/list.h>
-
-struct i915_buddy_block {
-#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
-#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
-#define I915_BUDDY_ALLOCATED (1 << 10)
-#define I915_BUDDY_FREE (2 << 10)
-#define I915_BUDDY_SPLIT (3 << 10)
-/* Free to be used, if needed in the future */
-#define I915_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
-#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
- u64 header;
-
- struct i915_buddy_block *left;
- struct i915_buddy_block *right;
- struct i915_buddy_block *parent;
-
- void *private; /* owned by creator */
-
- /*
- * While the block is allocated by the user through i915_buddy_alloc*,
- * the user has ownership of the link, for example to maintain within
- * a list, if so desired. As soon as the block is freed with
- * i915_buddy_free* ownership is given back to the mm.
- */
- struct list_head link;
- struct list_head tmp_link;
-};
-
-/* Order-zero must be at least PAGE_SIZE */
-#define I915_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
-
-/*
- * Binary Buddy System.
- *
- * Locking should be handled by the user, a simple mutex around
- * i915_buddy_alloc* and i915_buddy_free* should suffice.
- */
-struct i915_buddy_mm {
- /* Maintain a free list for each order. */
- struct list_head *free_list;
-
- /*
- * Maintain explicit binary tree(s) to track the allocation of the
- * address space. This gives us a simple way of finding a buddy block
- * and performing the potentially recursive merge step when freeing a
- * block. Nodes are either allocated or free, in which case they will
- * also exist on the respective free list.
- */
- struct i915_buddy_block **roots;
-
- /*
- * Anything from here is public, and remains static for the lifetime of
- * the mm. Everything above is considered do-not-touch.
- */
- unsigned int n_roots;
- unsigned int max_order;
-
- /* Must be at least PAGE_SIZE */
- u64 chunk_size;
- u64 size;
-};
-
-static inline u64
-i915_buddy_block_offset(struct i915_buddy_block *block)
-{
- return block->header & I915_BUDDY_HEADER_OFFSET;
-}
-
-static inline unsigned int
-i915_buddy_block_order(struct i915_buddy_block *block)
-{
- return block->header & I915_BUDDY_HEADER_ORDER;
-}
-
-static inline unsigned int
-i915_buddy_block_state(struct i915_buddy_block *block)
-{
- return block->header & I915_BUDDY_HEADER_STATE;
-}
-
-static inline bool
-i915_buddy_block_is_allocated(struct i915_buddy_block *block)
-{
- return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
-}
-
-static inline bool
-i915_buddy_block_is_free(struct i915_buddy_block *block)
-{
- return i915_buddy_block_state(block) == I915_BUDDY_FREE;
-}
-
-static inline bool
-i915_buddy_block_is_split(struct i915_buddy_block *block)
-{
- return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
-}
-
-static inline u64
-i915_buddy_block_size(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- return mm->chunk_size << i915_buddy_block_order(block);
-}
-
-int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);
-
-void i915_buddy_fini(struct i915_buddy_mm *mm);
-
-struct i915_buddy_block *
-i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
-
-int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
- struct list_head *blocks,
- u64 start, u64 size);
-
-void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
-
-void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index e6f1e93abbbb..3992c25a191d 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -946,8 +946,8 @@ int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
- engine->class == COPY_ENGINE_CLASS))
+ if (GRAPHICS_VER(engine->i915) != 7 && !(GRAPHICS_VER(engine->i915) == 9 &&
+ engine->class == COPY_ENGINE_CLASS))
return 0;
switch (engine->class) {
@@ -977,7 +977,7 @@ int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
break;
case COPY_ENGINE_CLASS:
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
- if (IS_GEN(engine->i915, 9)) {
+ if (GRAPHICS_VER(engine->i915) == 9) {
cmd_tables = gen9_blt_cmd_table;
cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
engine->get_cmd_length_mask =
@@ -993,7 +993,7 @@ int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
}
- if (IS_GEN(engine->i915, 9)) {
+ if (GRAPHICS_VER(engine->i915) == 9) {
engine->reg_tables = gen9_blt_reg_tables;
engine->reg_table_count =
ARRAY_SIZE(gen9_blt_reg_tables);
@@ -1369,6 +1369,20 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
return 0;
}
+/**
+ * intel_engine_cmd_parser_alloc_jump_whitelist() - preallocate jump whitelist for intel_engine_cmd_parser()
+ * @batch_length: length of the commands in batch_obj
+ * @trampoline: Whether jump trampolines are used.
+ *
+ * Preallocates a jump whitelist for parsing the cmd buffer in intel_engine_cmd_parser().
+ * This has to be preallocated, because the command parser runs in signaling context,
+ * and may not allocate any memory.
+ *
+ * Return: NULL or pointer to a jump whitelist, or ERR_PTR() on failure. Use
+ * IS_ERR() to check for errors. Must bre freed() with kfree().
+ *
+ * NULL is a valid value, meaning no allocation was required.
+ */
unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
bool trampoline)
{
@@ -1401,7 +1415,9 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
* @batch_offset: byte offset in the batch at which execution starts
* @batch_length: length of the commands in batch_obj
* @shadow: validated copy of the batch buffer in question
- * @trampoline: whether to emit a conditional trampoline at the end of the batch
+ * @jump_whitelist: buffer preallocated with intel_engine_cmd_parser_alloc_jump_whitelist()
+ * @shadow_map: mapping to @shadow vma
+ * @batch_map: mapping to @batch vma
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1521,7 +1537,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
if (IS_HASWELL(engine->i915))
flags = MI_BATCH_NON_SECURE_HSW;
- GEM_BUG_ON(!IS_GEN_RANGE(engine->i915, 6, 7));
+ GEM_BUG_ON(!IS_GRAPHICS_VER(engine->i915, 6, 7));
__gen6_emit_bb_start(batch_end,
batch_addr,
flags);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b654b7498bcd..cc745751ac53 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -124,6 +124,17 @@ stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
}
}
+static const char *stringify_vma_type(const struct i915_vma *vma)
+{
+ if (i915_vma_is_ggtt(vma))
+ return "ggtt";
+
+ if (i915_vma_is_dpt(vma))
+ return "dpt";
+
+ return "ppgtt";
+}
+
void
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
@@ -156,11 +167,11 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (i915_vma_is_pinned(vma))
pin_count++;
- seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
- i915_vma_is_ggtt(vma) ? "g" : "pp",
+ seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
+ stringify_vma_type(vma),
vma->node.start, vma->node.size,
stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
- if (i915_vma_is_ggtt(vma)) {
+ if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
switch (vma->ggtt_view.type) {
case I915_GGTT_VIEW_NORMAL:
seq_puts(m, ", normal");
@@ -350,7 +361,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- if (IS_GEN(dev_priv, 5)) {
+ if (GRAPHICS_VER(dev_priv) == 5) {
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
@@ -397,7 +408,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(rps, rps->efficient_freq));
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ } else if (GRAPHICS_VER(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
u32 rp_state_cap;
@@ -421,7 +432,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (GRAPHICS_VER(dev_priv) >= 9)
reqf >>= 23;
else {
reqf &= ~GEN6_TURBO_DISABLE;
@@ -447,7 +458,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (GRAPHICS_VER(dev_priv) >= 11) {
pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
/*
@@ -456,7 +467,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
*/
pm_isr = 0;
pm_iir = 0;
- } else if (INTEL_GEN(dev_priv) >= 8) {
+ } else if (GRAPHICS_VER(dev_priv) >= 8) {
pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2));
pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2));
pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2));
@@ -479,14 +490,14 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_mask);
- if (INTEL_GEN(dev_priv) <= 10)
+ if (GRAPHICS_VER(dev_priv) <= 10)
seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
pm_isr, pm_iir);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
+ (gt_perf_status & (GRAPHICS_VER(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -527,20 +538,20 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+ GRAPHICS_VER(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+ GRAPHICS_VER(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+ GRAPHICS_VER(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -611,20 +622,20 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_puts(m, "L-shaped memory detected\n");
/* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
- if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
+ if (GRAPHICS_VER(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
return 0;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- if (IS_GEN_RANGE(dev_priv, 3, 4)) {
+ if (IS_GRAPHICS_VER(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
intel_uncore_read(uncore, DCC));
seq_printf(m, "DDC2 = 0x%08x\n",
intel_uncore_read(uncore, DCC2));
seq_printf(m, "C0DRB3 = 0x%04x\n",
- intel_uncore_read16(uncore, C0DRB3));
+ intel_uncore_read16(uncore, C0DRB3_BW));
seq_printf(m, "C1DRB3 = 0x%04x\n",
- intel_uncore_read16(uncore, C1DRB3));
+ intel_uncore_read16(uncore, C1DRB3_BW));
} else if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
intel_uncore_read(uncore, MAD_DIMM_C0));
@@ -634,7 +645,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
intel_uncore_read(uncore, MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
intel_uncore_read(uncore, TILECTL));
- if (INTEL_GEN(dev_priv) >= 8)
+ if (GRAPHICS_VER(dev_priv) >= 8)
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
intel_uncore_read(uncore, GAMTARBMODE));
else
@@ -945,7 +956,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
atomic_inc(&gt->user_wakeref);
intel_gt_pm_get(gt);
- if (INTEL_GEN(i915) >= 6)
+ if (GRAPHICS_VER(i915) >= 6)
intel_uncore_forcewake_user_get(gt->uncore);
return 0;
@@ -956,7 +967,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
struct drm_i915_private *i915 = inode->i_private;
struct intel_gt *gt = &i915->gt;
- if (INTEL_GEN(i915) >= 6)
+ if (GRAPHICS_VER(i915) >= 6)
intel_uncore_forcewake_user_put(&i915->uncore);
intel_gt_pm_put(gt);
atomic_dec(&gt->user_wakeref);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c2329bc44f55..850b499c71c8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -39,6 +39,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
@@ -48,7 +49,7 @@
#include "display/intel_acpi.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
-#include "display/intel_csr.h"
+#include "display/intel_dmc.h"
#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
@@ -83,6 +84,7 @@
#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pm.h"
+#include "intel_region_ttm.h"
#include "intel_sideband.h"
#include "vlv_suspend.h"
@@ -105,12 +107,12 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
static int
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
{
- int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
- if (INTEL_GEN(dev_priv) >= 4)
+ if (GRAPHICS_VER(dev_priv) >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -137,7 +139,7 @@ intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
return ret;
}
- if (INTEL_GEN(dev_priv) >= 4)
+ if (GRAPHICS_VER(dev_priv) >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
@@ -150,7 +152,7 @@ intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
static void
intel_setup_mchbar(struct drm_i915_private *dev_priv)
{
- int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -189,7 +191,7 @@ intel_setup_mchbar(struct drm_i915_private *dev_priv)
static void
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
{
- int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
@@ -334,6 +336,10 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
goto err_workqueues;
+ ret = intel_region_ttm_device_init(dev_priv);
+ if (ret)
+ goto err_ttm;
+
intel_wopcm_init_early(&dev_priv->wopcm);
intel_gt_init_early(&dev_priv->gt, dev_priv);
@@ -358,6 +364,8 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release(&dev_priv->gt);
+ intel_region_ttm_device_fini(dev_priv);
+err_ttm:
vlv_suspend_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
@@ -375,6 +383,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
intel_power_domains_cleanup(dev_priv);
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release(&dev_priv->gt);
+ intel_region_ttm_device_fini(dev_priv);
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
@@ -474,7 +483,7 @@ static int i915_set_dma_info(struct drm_i915_private *i915)
goto mask_err;
/* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN(i915, 2))
+ if (GRAPHICS_VER(i915) == 2)
mask_size = 30;
/*
@@ -553,7 +562,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_perf;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
if (ret)
goto err_ggtt;
@@ -600,7 +609,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
* device. The kernel then disables that interrupt source and so
* prevents the other device from working properly.
*/
- if (INTEL_GEN(dev_priv) >= 5) {
+ if (GRAPHICS_VER(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
drm_dbg(&dev_priv->drm, "can't enable MSI");
}
@@ -630,6 +639,8 @@ err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
i915_ggtt_driver_release(dev_priv);
+ i915_gem_drain_freed_objects(dev_priv);
+ i915_ggtt_driver_late_release(dev_priv);
err_perf:
i915_perf_fini(dev_priv);
return ret;
@@ -728,7 +739,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
intel_platform_name(INTEL_INFO(dev_priv)->platform),
intel_subplatform(RUNTIME_INFO(dev_priv),
INTEL_INFO(dev_priv)->platform),
- INTEL_GEN(dev_priv));
+ GRAPHICS_VER(dev_priv));
intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
@@ -757,7 +768,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(i915))
return i915;
- i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
/* Device parameters start as a copy of module parameters. */
@@ -768,8 +778,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(device_info, match_info, sizeof(*device_info));
RUNTIME_INFO(i915)->device_id = pdev->device;
- BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
-
return i915;
}
@@ -796,7 +804,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
- if (!i915->params.nuclear_pageflip && match_info->gen < 5)
+ if (!i915->params.nuclear_pageflip && match_info->graphics_ver < 5)
i915->drm.driver_features &= ~DRIVER_ATOMIC;
/*
@@ -805,7 +813,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
- if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
+ if (GRAPHICS_VER(i915) >= 9 && i915_selftest.live < 0 &&
i915->params.fake_lmem_start) {
mkwrite_device_info(i915)->memory_regions =
REGION_SMEM | REGION_LMEM | REGION_STOLEN_SMEM;
@@ -882,6 +890,8 @@ out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
i915_ggtt_driver_release(i915);
+ i915_gem_drain_freed_objects(i915);
+ i915_ggtt_driver_late_release(i915);
out_cleanup_mmio:
i915_driver_mmio_release(i915);
out_runtime_pm_put:
@@ -938,6 +948,7 @@ static void i915_driver_release(struct drm_device *dev)
intel_memory_regions_driver_release(dev_priv);
i915_ggtt_driver_release(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
+ i915_ggtt_driver_late_release(dev_priv);
i915_driver_mmio_release(dev_priv);
@@ -973,8 +984,12 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
*/
static void i915_driver_lastclose(struct drm_device *dev)
{
+ struct drm_i915_private *i915 = to_i915(dev);
+
intel_fbdev_restore_mode(dev);
- vga_switcheroo_process_delayed_switch();
+
+ if (HAS_DISPLAY(i915))
+ vga_switcheroo_process_delayed_switch();
}
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -994,6 +1009,9 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
drm_modeset_lock_all(dev);
for_each_intel_encoder(dev, encoder)
if (encoder->suspend)
@@ -1006,6 +1024,9 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
drm_modeset_lock_all(dev);
for_each_intel_encoder(dev, encoder)
if (encoder->shutdown)
@@ -1021,9 +1042,11 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
i915_gem_suspend(i915);
- drm_kms_helper_poll_disable(&i915->drm);
+ if (HAS_DISPLAY(i915)) {
+ drm_kms_helper_poll_disable(&i915->drm);
- drm_atomic_helper_shutdown(&i915->drm);
+ drm_atomic_helper_shutdown(&i915->drm);
+ }
intel_dp_mst_suspend(i915);
@@ -1033,10 +1056,18 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
+ intel_dmc_ucode_suspend(i915);
+
/*
* The only requirement is to reboot with display DC states disabled,
* for now leaving all display power wells in the INIT power domain
- * enabled matching the driver reload sequence.
+ * enabled.
+ *
+ * TODO:
+ * - unify the pci_driver::shutdown sequence here with the
+ * pci_driver.driver.pm.poweroff,poweroff_late sequence.
+ * - unify the driver remove and system/runtime suspend sequences with
+ * the above unified shutdown/poweroff sequence.
*/
intel_power_domains_driver_remove(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -1079,8 +1110,8 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_power_domains_disable(dev_priv);
-
- drm_kms_helper_poll_disable(dev);
+ if (HAS_DISPLAY(dev_priv))
+ drm_kms_helper_poll_disable(dev);
pci_save_state(pdev);
@@ -1106,7 +1137,7 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->suspend_count++;
- intel_csr_ucode_suspend(dev_priv);
+ intel_dmc_ucode_suspend(dev_priv);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1164,7 +1195,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* Fujitsu FSC S7110
* Acer Aspire 1830T
*/
- if (!(hibernation && INTEL_GEN(dev_priv) < 6))
+ if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
pci_set_power_state(pdev, PCI_D3hot);
out:
@@ -1208,7 +1239,7 @@ static int i915_drm_resume(struct drm_device *dev)
i915_ggtt_resume(&dev_priv->ggtt);
- intel_csr_ucode_resume(dev_priv);
+ intel_dmc_ucode_resume(dev_priv);
i915_restore_display(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
@@ -1227,7 +1258,8 @@ static int i915_drm_resume(struct drm_device *dev)
*/
intel_runtime_pm_enable_interrupts(dev_priv);
- drm_mode_config_reset(dev);
+ if (HAS_DISPLAY(dev_priv))
+ drm_mode_config_reset(dev);
i915_gem_resume(dev_priv);
@@ -1240,7 +1272,8 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_resume(dev);
intel_hpd_poll_disable(dev_priv);
- drm_kms_helper_poll_enable(dev);
+ if (HAS_DISPLAY(dev_priv))
+ drm_kms_helper_poll_enable(dev);
intel_opregion_resume(dev_priv);
@@ -1707,6 +1740,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9ec9277539ec..38ff2fb89744 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -51,7 +51,6 @@
#include <linux/xarray.h>
#include <drm/intel-gtt.h>
-#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
@@ -60,6 +59,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/i915_mei_hdcp_interface.h>
+#include <drm/ttm/ttm_device.h>
#include "i915_params.h"
#include "i915_reg.h"
@@ -68,6 +68,7 @@
#include "display/intel_bios.h"
#include "display/intel_display.h"
#include "display/intel_display_power.h"
+#include "display/intel_dmc.h"
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsb.h"
#include "display/intel_frontbuffer.h"
@@ -78,6 +79,7 @@
#include "gem/i915_gem_context_types.h"
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
@@ -328,23 +330,6 @@ struct drm_i915_display_funcs {
void (*read_luts)(struct intel_crtc_state *crtc_state);
};
-struct intel_csr {
- struct work_struct work;
- const char *fw_path;
- u32 required_version;
- u32 max_fw_size; /* bytes */
- u32 *dmc_payload;
- u32 dmc_fw_size; /* dwords */
- u32 version;
- u32 mmio_count;
- i915_reg_t mmioaddr[20];
- u32 mmiodata[20];
- u32 dc_state;
- u32 target_dc_state;
- u32 allowed_dc_mask;
- intel_wakeref_t wakeref;
-};
-
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
@@ -514,6 +499,13 @@ struct intel_l3_parity {
};
struct i915_gem_mm {
+ /*
+ * Shortcut for the stolen region. This points to either
+ * INTEL_REGION_STOLEN_SMEM for integrated platforms, or
+ * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't
+ * support stolen.
+ */
+ struct intel_memory_region *stolen_region;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Protects the usage of the GTT stolen memory allocator. This is
@@ -582,6 +574,8 @@ i915_fence_timeout(const struct drm_i915_private *i915)
/* Amount of SAGV/QGV points, BSpec precisely defines this */
#define I915_NUM_QGV_POINTS 8
+#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
+
struct ddi_vbt_port_info {
/* Non-NULL if port present. */
struct intel_bios_encoder_data *devdata;
@@ -769,6 +763,7 @@ struct intel_cdclk_config {
struct i915_selftest_stash {
atomic_t counter;
+ struct ida mock_region_instances;
};
struct drm_i915_private {
@@ -817,7 +812,7 @@ struct drm_i915_private {
struct intel_wopcm wopcm;
- struct intel_csr csr;
+ struct intel_dmc dmc;
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
@@ -1131,6 +1126,9 @@ struct drm_i915_private {
u8 framestart_delay;
+ /* Window2 specifies time required to program DSB (Window2) in number of scan lines */
+ u8 window2_delay;
+
u8 pch_ssc_use;
/* For i915gm/i945gm vblank irq workaround */
@@ -1158,6 +1156,9 @@ struct drm_i915_private {
/* Mutex to protect the above hdcp component related values. */
struct mutex hdcp_comp_mutex;
+ /* The TTM device structure. */
+ struct ttm_device bdev;
+
I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
/*
@@ -1234,29 +1235,37 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
-#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
-#define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.version)
-#define IS_DISPLAY_RANGE(i915, from, until) \
- (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
-#define IS_DISPLAY_VER(i915, v) (DISPLAY_VER(i915) == (v))
+/*
+ * Deprecated: this will be replaced by individual IP checks:
+ * GRAPHICS_VER(), MEDIA_VER() and DISPLAY_VER()
+ */
+#define INTEL_GEN(dev_priv) GRAPHICS_VER(dev_priv)
+/*
+ * Deprecated: use IS_GRAPHICS_VER(), IS_MEDIA_VER() and IS_DISPLAY_VER() as
+ * appropriate.
+ */
+#define IS_GEN_RANGE(dev_priv, s, e) IS_GRAPHICS_VER(dev_priv, (s), (e))
+/*
+ * Deprecated: use GRAPHICS_VER(), MEDIA_VER() and DISPLAY_VER() as appropriate.
+ */
+#define IS_GEN(dev_priv, n) (GRAPHICS_VER(dev_priv) == (n))
-#define REVID_FOREVER 0xff
-#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
+#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics_ver)
+#define IS_GRAPHICS_VER(i915, from, until) \
+ (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
-#define INTEL_GEN_MASK(s, e) ( \
- BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
- BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
- GENMASK((e) - 1, (s) - 1))
+#define MEDIA_VER(i915) (INTEL_INFO(i915)->media_ver)
+#define IS_MEDIA_VER(i915, from, until) \
+ (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
-/* Returns true if Gen is in inclusive range [Start, End] */
-#define IS_GEN_RANGE(dev_priv, s, e) \
- (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
+#define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver)
+#define IS_DISPLAY_VER(i915, from, until) \
+ (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
-#define IS_GEN(dev_priv, n) \
- (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
- INTEL_INFO(dev_priv)->gen == (n))
+#define REVID_FOREVER 0xff
+#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
@@ -1384,6 +1393,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
+#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -1534,9 +1544,17 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_ALDERLAKE_S(__i915) && \
IS_GT_STEP(__i915, since, until))
-#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
-#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
-#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
+#define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
+ (IS_ALDERLAKE_P(__i915) && \
+ IS_DISPLAY_STEP(__i915, since, until))
+
+#define IS_ADLP_GT_STEP(__i915, since, until) \
+ (IS_ALDERLAKE_P(__i915) && \
+ IS_GT_STEP(__i915, since, until))
+
+#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
+#define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
+#define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
@@ -1556,12 +1574,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
* All later gens can run the final buffer from the ppgtt
*/
-#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
+#define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7)
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
-#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
+#define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6)
#define HAS_WT(dev_priv) HAS_EDRAM(dev_priv)
#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
@@ -1594,7 +1612,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
- (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
+ (IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9)
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@@ -1602,23 +1620,22 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SKL_GT3(dev_priv) || \
IS_SKL_GT4(dev_priv))
-#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
-#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
+#define HAS_GMBUS_IRQ(dev_priv) (GRAPHICS_VER(dev_priv) >= 4)
+#define HAS_GMBUS_BURST_READ(dev_priv) (GRAPHICS_VER(dev_priv) >= 10 || \
IS_GEMINILAKE(dev_priv) || \
IS_KABYLAKE(dev_priv))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
- !(IS_I915G(dev_priv) || \
- IS_I915GM(dev_priv)))
+#define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \
+ !(IS_I915G(dev_priv) || IS_I915GM(dev_priv)))
#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
-#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
+#define HAS_FW_BLC(dev_priv) (GRAPHICS_VER(dev_priv) > 2)
#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
-#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
+#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && GRAPHICS_VER(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
@@ -1629,7 +1646,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
#define HAS_PSR_HW_TRACKING(dev_priv) \
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
-#define HAS_PSR2_SEL_FETCH(dev_priv) (INTEL_GEN(dev_priv) >= 12)
+#define HAS_PSR2_SEL_FETCH(dev_priv) (GRAPHICS_VER(dev_priv) >= 12)
#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
@@ -1638,9 +1655,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
-#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
+#define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc)
-#define HAS_MSO(i915) (INTEL_GEN(i915) >= 12)
+#define HAS_MSO(i915) (GRAPHICS_VER(i915) >= 12)
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
@@ -1659,7 +1676,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
-#define HAS_LSPCON(dev_priv) (IS_GEN_RANGE(dev_priv, 9, 10))
+#define HAS_LSPCON(dev_priv) (IS_GRAPHICS_VER(dev_priv, 9, 10))
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
@@ -1673,7 +1690,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
-#define HAS_VRR(i915) (INTEL_GEN(i915) >= 12)
+#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12)
/* Only valid when HAS_DISPLAY() is true */
#define INTEL_DISPLAY_ENABLED(dev_priv) \
@@ -1700,13 +1717,19 @@ static inline bool intel_vtd_active(void)
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
{
- return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
+ return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active();
}
static inline bool
-intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
{
- return IS_BROXTON(dev_priv) && intel_vtd_active();
+ return IS_BROXTON(i915) && intel_vtd_active();
+}
+
+static inline bool
+intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
+{
+ return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
}
/* i915_drv.c */
@@ -1728,7 +1751,8 @@ void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
+ u16 type, u16 instance);
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
@@ -1786,6 +1810,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
+#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
@@ -1905,22 +1930,31 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ struct io_mapping *iomap);
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase);
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 10)
+ if (GRAPHICS_VER(i915) >= 10)
return CNL_HWS_CSB_WRITE_INDEX;
else
return I915_HWS_CSB_WRITE_INDEX;
}
static inline enum i915_map_type
-i915_coherent_map_type(struct drm_i915_private *i915)
+i915_coherent_map_type(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj, bool always_coherent)
{
- return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
+ if (i915_gem_object_is_lmem(obj))
+ return I915_MAP_WC;
+ if (HAS_LLC(i915) || always_coherent)
+ return I915_MAP_WB;
+ else
+ return I915_MAP_WC;
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b23f58e94cfb..589388dec48a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -157,8 +157,18 @@ try_again:
if (vma) {
ret = -EBUSY;
if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
- !i915_vma_is_active(vma))
- ret = i915_vma_unbind(vma);
+ !i915_vma_is_active(vma)) {
+ if (flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK) {
+ if (mutex_trylock(&vma->vm->mutex)) {
+ ret = __i915_vma_unbind(vma);
+ mutex_unlock(&vma->vm->mutex);
+ } else {
+ ret = -EBUSY;
+ }
+ } else {
+ ret = i915_vma_unbind(vma);
+ }
+ }
__i915_vma_put(vma);
}
@@ -432,7 +442,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
/* PREAD is disallowed for all platforms after TGL-LP. This also
* covers all platforms with local memory.
*/
- if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915))
+ if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
return -EOPNOTSUPP;
if (args->size == 0)
@@ -712,7 +722,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
/* PWRITE is disallowed for all platforms after TGL-LP. This also
* covers all platforms with local memory.
*/
- if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915))
+ if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
return -EOPNOTSUPP;
if (args->size == 0)
@@ -999,12 +1009,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->mm.madv = args->madv;
if (i915_gem_object_has_pages(obj)) {
- struct list_head *list;
+ unsigned long flags;
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ if (!list_empty(&obj->mm.link)) {
+ struct list_head *list;
if (obj->mm.madv != I915_MADV_WILLNEED)
list = &i915->mm.purge_list;
@@ -1012,8 +1021,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
list = &i915->mm.shrink_list;
list_move_tail(&obj->mm.link, list);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
/* if the object is no longer attached, discard its backing storage */
@@ -1099,6 +1108,7 @@ err_unlock:
}
i915_gem_drain_freed_objects(dev_priv);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 3aa213684293..77f1911c463b 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -87,7 +87,6 @@ static void __i915_globals_cleanup(void)
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
- i915_global_buddy_init,
i915_global_context_init,
i915_global_gem_context_init,
i915_global_objects_init,
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
index b2f5cd9b9b1a..2d199f411a4a 100644
--- a/drivers/gpu/drm/i915/i915_globals.h
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -27,7 +27,6 @@ void i915_globals_exit(void);
/* constructors */
int i915_global_active_init(void);
-int i915_global_buddy_init(void);
int i915_global_context_init(void);
int i915_global_gem_context_init(void);
int i915_global_objects_init(void);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index bb181fe5d47e..35c97c39f125 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -36,8 +36,7 @@
#include <drm/drm_print.h>
-#include "display/intel_atomic.h"
-#include "display/intel_csr.h"
+#include "display/intel_dmc.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
@@ -436,13 +435,13 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
- if (ee->engine->class != RENDER_CLASS || INTEL_GEN(m->i915) <= 3)
+ if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
return;
err_printf(m, " SC_INSTDONE: 0x%08x\n",
ee->instdone.slice_common);
- if (INTEL_GEN(m->i915) <= 6)
+ if (GRAPHICS_VER(m->i915) <= 6)
return;
for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
@@ -455,7 +454,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
slice, subslice,
ee->instdone.row[slice][subslice]);
- if (INTEL_GEN(m->i915) < 12)
+ if (GRAPHICS_VER(m->i915) < 12)
return;
err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
@@ -544,7 +543,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
upper_32_bits(start), lower_32_bits(start),
upper_32_bits(end), lower_32_bits(end));
}
- if (INTEL_GEN(m->i915) >= 4) {
+ if (GRAPHICS_VER(m->i915) >= 4) {
err_printf(m, " BBADDR: 0x%08x_%08x\n",
(u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
@@ -553,14 +552,14 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
lower_32_bits(ee->faddr));
- if (INTEL_GEN(m->i915) >= 6) {
+ if (GRAPHICS_VER(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
}
if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
- if (INTEL_GEN(m->i915) >= 8) {
+ if (GRAPHICS_VER(m->i915) >= 8) {
int i;
for (i = 0; i < 4; i++)
err_printf(m, " PDP%d: 0x%016llx\n",
@@ -707,25 +706,25 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
for (i = 0; i < gt->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
- if (IS_GEN_RANGE(m->i915, 6, 11)) {
+ if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
err_printf(m, "ERROR: 0x%08x\n", gt->error);
err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
}
- if (INTEL_GEN(m->i915) >= 8)
+ if (GRAPHICS_VER(m->i915) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
gt->fault_data1, gt->fault_data0);
- if (IS_GEN(m->i915, 7))
+ if (GRAPHICS_VER(m->i915) == 7)
err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
- if (IS_GEN_RANGE(m->i915, 8, 11))
+ if (IS_GRAPHICS_VER(m->i915, 8, 11))
err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
- if (IS_GEN(m->i915, 12))
+ if (GRAPHICS_VER(m->i915) == 12)
err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
- if (INTEL_GEN(m->i915) >= 12) {
+ if (GRAPHICS_VER(m->i915) >= 12) {
int i;
for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
@@ -789,14 +788,14 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(m->i915)) {
- struct intel_csr *csr = &m->i915->csr;
+ if (HAS_DMC(m->i915)) {
+ struct intel_dmc *dmc = &m->i915->dmc;
err_printf(m, "DMC loaded: %s\n",
- yesno(csr->dmc_payload != NULL));
+ yesno(intel_dmc_has_payload(m->i915) != 0));
err_printf(m, "DMC fw version: %d.%d\n",
- CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version));
+ DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
}
err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
@@ -808,9 +807,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
- if (error->display)
- intel_display_print_error_state(m, error->display);
-
err_print_capabilities(m, error);
err_print_params(m, &error->params);
}
@@ -974,7 +970,6 @@ void __i915_gpu_coredump_free(struct kref *error_ref)
}
kfree(error->overlay);
- kfree(error->display);
cleanup_params(error);
@@ -1097,12 +1092,12 @@ static void gt_record_fences(struct intel_gt_coredump *gt)
struct intel_uncore *uncore = gt->_gt->uncore;
int i;
- if (INTEL_GEN(uncore->i915) >= 6) {
+ if (GRAPHICS_VER(uncore->i915) >= 6) {
for (i = 0; i < ggtt->num_fences; i++)
gt->fence[i] =
intel_uncore_read64(uncore,
FENCE_REG_GEN6_LO(i));
- } else if (INTEL_GEN(uncore->i915) >= 4) {
+ } else if (GRAPHICS_VER(uncore->i915) >= 4) {
for (i = 0; i < ggtt->num_fences; i++)
gt->fence[i] =
intel_uncore_read64(uncore,
@@ -1120,20 +1115,20 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
const struct intel_engine_cs *engine = ee->engine;
struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(i915) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
- if (INTEL_GEN(i915) >= 12)
+ if (GRAPHICS_VER(i915) >= 12)
ee->fault_reg = intel_uncore_read(engine->uncore,
GEN12_RING_FAULT_REG);
- else if (INTEL_GEN(i915) >= 8)
+ else if (GRAPHICS_VER(i915) >= 8)
ee->fault_reg = intel_uncore_read(engine->uncore,
GEN8_RING_FAULT_REG);
else
ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
}
- if (INTEL_GEN(i915) >= 4) {
+ if (GRAPHICS_VER(i915) >= 4) {
ee->esr = ENGINE_READ(engine, RING_ESR);
ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
@@ -1141,7 +1136,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
ee->instps = ENGINE_READ(engine, RING_INSTPS);
ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
ee->ccid = ENGINE_READ(engine, CCID);
- if (INTEL_GEN(i915) >= 8) {
+ if (GRAPHICS_VER(i915) >= 8) {
ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
}
@@ -1160,13 +1155,13 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
ee->head = ENGINE_READ(engine, RING_HEAD);
ee->tail = ENGINE_READ(engine, RING_TAIL);
ee->ctl = ENGINE_READ(engine, RING_CTL);
- if (INTEL_GEN(i915) > 2)
+ if (GRAPHICS_VER(i915) > 2)
ee->mode = ENGINE_READ(engine, RING_MI_MODE);
if (!HWS_NEEDS_PHYSICAL(i915)) {
i915_reg_t mmio;
- if (IS_GEN(i915, 7)) {
+ if (GRAPHICS_VER(i915) == 7) {
switch (engine->id) {
default:
MISSING_CASE(engine->id);
@@ -1184,7 +1179,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN(engine->i915, 6)) {
+ } else if (GRAPHICS_VER(engine->i915) == 6) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
@@ -1201,13 +1196,13 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
- if (IS_GEN(i915, 6)) {
+ if (GRAPHICS_VER(i915) == 6) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
- } else if (IS_GEN(i915, 7)) {
+ } else if (GRAPHICS_VER(i915) == 7) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE);
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
u32 base = engine->mmio_base;
for (i = 0; i < 4; i++) {
@@ -1539,52 +1534,52 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
}
- if (IS_GEN(i915, 7))
+ if (GRAPHICS_VER(i915) == 7)
gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
- if (INTEL_GEN(i915) >= 12) {
+ if (GRAPHICS_VER(i915) >= 12) {
gt->fault_data0 = intel_uncore_read(uncore,
GEN12_FAULT_TLB_DATA0);
gt->fault_data1 = intel_uncore_read(uncore,
GEN12_FAULT_TLB_DATA1);
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
gt->fault_data0 = intel_uncore_read(uncore,
GEN8_FAULT_TLB_DATA0);
gt->fault_data1 = intel_uncore_read(uncore,
GEN8_FAULT_TLB_DATA1);
}
- if (IS_GEN(i915, 6)) {
+ if (GRAPHICS_VER(i915) == 6) {
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
}
/* 2: Registers which belong to multiple generations */
- if (INTEL_GEN(i915) >= 7)
+ if (GRAPHICS_VER(i915) >= 7)
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
- if (INTEL_GEN(i915) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
gt->derrmr = intel_uncore_read(uncore, DERRMR);
- if (INTEL_GEN(i915) < 12) {
+ if (GRAPHICS_VER(i915) < 12) {
gt->error = intel_uncore_read(uncore, ERROR_GEN6);
gt->done_reg = intel_uncore_read(uncore, DONE_REG);
}
}
/* 3: Feature specific registers */
- if (IS_GEN_RANGE(i915, 6, 7)) {
+ if (IS_GRAPHICS_VER(i915, 6, 7)) {
gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
}
- if (IS_GEN_RANGE(i915, 8, 11))
+ if (IS_GRAPHICS_VER(i915, 8, 11))
gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
- if (IS_GEN(i915, 12))
+ if (GRAPHICS_VER(i915) == 12)
gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
- if (INTEL_GEN(i915) >= 12) {
+ if (GRAPHICS_VER(i915) >= 12) {
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
gt->sfc_done[i] =
intel_uncore_read(uncore, GEN12_SFC_DONE(i));
@@ -1594,7 +1589,7 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
}
/* 4: Everything else */
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
gt->gtier[0] =
intel_uncore_read(uncore,
@@ -1613,7 +1608,7 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
intel_uncore_read(uncore,
GEN11_GUNIT_CSME_INTR_ENABLE);
gt->ngtier = 6;
- } else if (INTEL_GEN(i915) >= 8) {
+ } else if (GRAPHICS_VER(i915) >= 8) {
gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
gt->gtier[i] =
@@ -1623,7 +1618,7 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->ier = intel_uncore_read(uncore, DEIER);
gt->gtier[0] = intel_uncore_read(uncore, GTIER);
gt->ngtier = 1;
- } else if (IS_GEN(i915, 2)) {
+ } else if (GRAPHICS_VER(i915) == 2) {
gt->ier = intel_uncore_read16(uncore, GEN2_IER);
} else if (!IS_VALLEYVIEW(i915)) {
gt->ier = intel_uncore_read(uncore, GEN2_IER);
@@ -1679,7 +1674,7 @@ static const char *error_msg(struct i915_gpu_coredump *error)
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%x:%08x",
- INTEL_GEN(error->i915), hung_classes,
+ GRAPHICS_VER(error->i915), hung_classes,
generate_ecode(first));
if (first && first->context.pid) {
/* Just show the first executing process, more is confusing */
@@ -1826,7 +1821,6 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
}
error->overlay = intel_overlay_capture_error_state(i915);
- error->display = intel_display_capture_error_state(i915);
return error;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 16bc42de4b84..b98d8cdbe4f2 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -1,7 +1,7 @@
/*
* SPDX-License-Identifier: MIT
*
- * Copyright � 2008-2018 Intel Corporation
+ * Copyright © 2008-2018 Intel Corporation
*/
#ifndef _I915_GPU_ERROR_H_
@@ -29,7 +29,6 @@ struct drm_i915_private;
struct i915_vma_compress;
struct intel_engine_capture_vma;
struct intel_overlay_error_state;
-struct intel_display_error_state;
struct i915_vma_coredump {
struct i915_vma_coredump *next;
@@ -182,7 +181,6 @@ struct i915_gpu_coredump {
struct i915_params params;
struct intel_overlay_error_state *overlay;
- struct intel_display_error_state *display;
struct scatterlist *sgl, *fit;
};
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7eefbdec25a2..a11bdb667241 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -35,6 +35,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_irq.h>
+#include "display/intel_de.h"
#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
@@ -194,7 +195,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 11)
hpd->hpd = hpd_gen11;
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
hpd->hpd = hpd_bxt;
else if (DISPLAY_VER(dev_priv) >= 8)
hpd->hpd = hpd_bdw;
@@ -806,7 +807,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -857,7 +858,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
unsigned long irqflags;
bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
- IS_G4X(dev_priv) || IS_DISPLAY_VER(dev_priv, 2) ||
+ IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
@@ -2077,7 +2078,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
}
- if (IS_DISPLAY_VER(dev_priv, 5) && de_iir & DE_PCU_EVENT)
+ if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
gen5_rps_irq_handler(&dev_priv->gt.rps);
}
@@ -2174,7 +2175,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
gt_iir = raw_reg_read(regs, GTIIR);
if (gt_iir) {
raw_reg_write(regs, GTIIR, gt_iir);
- if (INTEL_GEN(i915) >= 6)
+ if (GRAPHICS_VER(i915) >= 6)
gen6_gt_irq_handler(&i915->gt, gt_iir);
else
gen5_gt_irq_handler(&i915->gt, gt_iir);
@@ -2191,7 +2192,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
}
- if (INTEL_GEN(i915) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
if (pm_iir) {
raw_reg_write(regs, GEN6_PMIIR, pm_iir);
@@ -2269,7 +2270,17 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
u32 mask;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 13)
+ return TGL_DE_PORT_AUX_DDIA |
+ TGL_DE_PORT_AUX_DDIB |
+ TGL_DE_PORT_AUX_DDIC |
+ XELPD_DE_PORT_AUX_DDID |
+ XELPD_DE_PORT_AUX_DDIE |
+ TGL_DE_PORT_AUX_USBC1 |
+ TGL_DE_PORT_AUX_USBC2 |
+ TGL_DE_PORT_AUX_USBC3 |
+ TGL_DE_PORT_AUX_USBC4;
+ else if (DISPLAY_VER(dev_priv) >= 12)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -2287,10 +2298,10 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv) || IS_DISPLAY_VER(dev_priv, 11))
+ if (IS_CNL_WITH_PORT_F(dev_priv) || DISPLAY_VER(dev_priv) == 11)
mask |= CNL_AUX_CHANNEL_F;
- if (IS_DISPLAY_VER(dev_priv, 11))
+ if (DISPLAY_VER(dev_priv) == 11)
mask |= ICL_AUX_CHANNEL_E;
return mask;
@@ -2298,7 +2309,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
else if (DISPLAY_VER(dev_priv) >= 11)
return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2414,6 +2425,17 @@ static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
+u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ mask |= XELPD_PIPE_SOFT_UNDERRUN |
+ XELPD_PIPE_HARD_UNDERRUN;
+
+ return mask;
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2421,6 +2443,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
u32 iir;
enum pipe pipe;
+ drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
+
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
if (iir) {
@@ -2458,7 +2482,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
found = true;
}
- if (IS_GEN9_LP(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
@@ -2474,7 +2498,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
}
- if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ (iir & BXT_DE_PORT_GMBUS)) {
gmbus_irq_handler(dev_priv);
found = true;
}
@@ -2522,7 +2547,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
- if (iir & GEN8_PIPE_FIFO_UNDERRUN)
+ if (iir & gen8_de_pipe_underrun_mask(dev_priv))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
@@ -3014,7 +3039,7 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, DE);
dev_priv->irq_mask = ~0u;
- if (IS_GEN(dev_priv, 7))
+ if (GRAPHICS_VER(dev_priv) == 7)
intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
if (IS_HASWELL(dev_priv)) {
@@ -3058,14 +3083,13 @@ static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
}
}
-static void gen8_irq_reset(struct drm_i915_private *dev_priv)
+static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
- gen8_master_intr_disable(dev_priv->uncore.regs);
-
- gen8_gt_irq_reset(&dev_priv->gt);
+ if (!HAS_DISPLAY(dev_priv))
+ return;
intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
@@ -3077,6 +3101,16 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+}
+
+static void gen8_irq_reset(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ gen8_master_intr_disable(dev_priv->uncore.regs);
+
+ gen8_gt_irq_reset(&dev_priv->gt);
+ gen8_display_irq_reset(dev_priv);
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
@@ -3092,6 +3126,9 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
if (DISPLAY_VER(dev_priv) >= 12) {
@@ -3147,7 +3184,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ u32 extra_ier = GEN8_PIPE_VBLANK |
+ gen8_de_pipe_underrun_mask(dev_priv) |
gen8_de_pipe_flip_done_mask(dev_priv);
enum pipe pipe;
@@ -3620,7 +3658,7 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
- if (INTEL_GEN(dev_priv) >= 7) {
+ if (GRAPHICS_VER(dev_priv) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
@@ -3714,10 +3752,13 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
if (DISPLAY_VER(dev_priv) <= 10)
de_misc_masked |= GEN8_DE_MISC_GSE;
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS;
if (DISPLAY_VER(dev_priv) >= 11) {
@@ -3728,11 +3769,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
de_pipe_enables = de_pipe_masked |
- GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ GEN8_PIPE_VBLANK |
+ gen8_de_pipe_underrun_mask(dev_priv) |
gen8_de_pipe_flip_done_mask(dev_priv);
de_port_enables = de_port_masked;
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
else if (IS_BROADWELL(dev_priv))
de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
@@ -3797,6 +3839,16 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_master_intr_enable(dev_priv->uncore.regs);
}
+static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ gen8_de_irq_postinstall(dev_priv);
+
+ intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
+ GEN11_DISPLAY_IRQ_ENABLE);
+}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
@@ -3807,12 +3859,10 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
icp_irq_postinstall(dev_priv);
gen11_gt_irq_postinstall(&dev_priv->gt);
- gen8_de_irq_postinstall(dev_priv);
+ gen11_de_irq_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
- intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
-
if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
dg1_master_intr_enable(uncore->regs);
intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
@@ -3987,7 +4037,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
+ intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4095,7 +4145,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
+ intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4240,10 +4290,12 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
+ intel_engine_cs_irq(dev_priv->gt.engine[RCS0],
+ iir);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
+ intel_engine_cs_irq(dev_priv->gt.engine[VCS0],
+ iir >> 25);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4278,7 +4330,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->l3_parity.remap_info[i] = NULL;
/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
- if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
+ if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
if (!HAS_DISPLAY(dev_priv))
@@ -4317,7 +4369,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
else if (DISPLAY_VER(dev_priv) >= 11)
dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
dev_priv->display.hpd_irq_setup = icp_hpd_irq_setup;
@@ -4349,18 +4401,18 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
return cherryview_irq_handler;
else if (IS_VALLEYVIEW(dev_priv))
return valleyview_irq_handler;
- else if (IS_GEN(dev_priv, 4))
+ else if (GRAPHICS_VER(dev_priv) == 4)
return i965_irq_handler;
- else if (IS_GEN(dev_priv, 3))
+ else if (GRAPHICS_VER(dev_priv) == 3)
return i915_irq_handler;
else
return i8xx_irq_handler;
} else {
if (HAS_MASTER_UNIT_IRQ(dev_priv))
return dg1_irq_handler;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (GRAPHICS_VER(dev_priv) >= 11)
return gen11_irq_handler;
- else if (INTEL_GEN(dev_priv) >= 8)
+ else if (GRAPHICS_VER(dev_priv) >= 8)
return gen8_irq_handler;
else
return ilk_irq_handler;
@@ -4374,16 +4426,16 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv)
cherryview_irq_reset(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
valleyview_irq_reset(dev_priv);
- else if (IS_GEN(dev_priv, 4))
+ else if (GRAPHICS_VER(dev_priv) == 4)
i965_irq_reset(dev_priv);
- else if (IS_GEN(dev_priv, 3))
+ else if (GRAPHICS_VER(dev_priv) == 3)
i915_irq_reset(dev_priv);
else
i8xx_irq_reset(dev_priv);
} else {
- if (INTEL_GEN(dev_priv) >= 11)
+ if (GRAPHICS_VER(dev_priv) >= 11)
gen11_irq_reset(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 8)
+ else if (GRAPHICS_VER(dev_priv) >= 8)
gen8_irq_reset(dev_priv);
else
ilk_irq_reset(dev_priv);
@@ -4397,16 +4449,16 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
cherryview_irq_postinstall(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
valleyview_irq_postinstall(dev_priv);
- else if (IS_GEN(dev_priv, 4))
+ else if (GRAPHICS_VER(dev_priv) == 4)
i965_irq_postinstall(dev_priv);
- else if (IS_GEN(dev_priv, 3))
+ else if (GRAPHICS_VER(dev_priv) == 3)
i915_irq_postinstall(dev_priv);
else
i8xx_irq_postinstall(dev_priv);
} else {
- if (INTEL_GEN(dev_priv) >= 11)
+ if (GRAPHICS_VER(dev_priv) >= 11)
gen11_irq_postinstall(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 8)
+ else if (GRAPHICS_VER(dev_priv) >= 8)
gen8_irq_postinstall(dev_priv);
else
ilk_irq_postinstall(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 25f25cd95818..db34d5dbe402 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -100,6 +100,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
+u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv);
bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
ktime_t *vblank_time, bool in_vblank_irq);
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 4c8cd08c672d..666808cb3a32 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -28,10 +28,90 @@
#include "i915_drv.h"
-#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+struct remap_pfn {
+ struct mm_struct *mm;
+ unsigned long pfn;
+ pgprot_t prot;
+
+ struct sgt_iter sgt;
+ resource_size_t iobase;
+};
+
+static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+ r->pfn++;
+
+ return 0;
+}
#define use_dma(io) ((io) != -1)
+static inline unsigned long sgt_pfn(const struct remap_pfn *r)
+{
+ if (use_dma(r->iobase))
+ return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
+ else
+ return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
+}
+
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ if (GEM_WARN_ON(!r->sgt.sgp))
+ return -EINVAL;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte,
+ pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
+ r->pfn++; /* track insertions in case we need to unwind later */
+
+ r->sgt.curr += PAGE_SIZE;
+ if (r->sgt.curr >= r->sgt.max)
+ r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
+
+ return 0;
+}
+
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ * Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ struct io_mapping *iomap)
+{
+ struct remap_pfn r;
+ int err;
+
+#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+ /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ r.mm = vma->vm_mm;
+ r.pfn = pfn;
+ r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+ (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+ err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
+}
+
/**
* remap_io_sg - remap an IO mapping to userspace
* @vma: user vma to map to
@@ -46,7 +126,12 @@ int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase)
{
- unsigned long pfn, len, remapped = 0;
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .prot = vma->vm_page_prot,
+ .sgt = __sgt_iter(sgl, use_dma(iobase)),
+ .iobase = iobase,
+ };
int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
@@ -55,25 +140,11 @@ int remap_io_sg(struct vm_area_struct *vma,
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
- do {
- if (use_dma(iobase)) {
- if (!sg_dma_len(sgl))
- break;
- pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
- len = sg_dma_len(sgl);
- } else {
- pfn = page_to_pfn(sg_page(sgl));
- len = sgl->length;
- }
-
- err = remap_pfn_range(vma, addr + remapped, pfn, len,
- vma->vm_page_prot);
- if (err)
- break;
- remapped += len;
- } while ((sgl = __sg_next(sgl)));
-
- if (err)
- zap_vma_ptes(vma, addr, remapped);
- return err;
+ err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 0320878d96b0..e07f4cfea63a 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -160,7 +160,7 @@ i915_param_named_unsafe(edp_vswing, int, 0400,
i915_param_named_unsafe(enable_guc, int, 0400,
"Enable GuC load for GuC submission and/or HuC load. "
"Required functionality can be selected using bitmask values. "
- "(-1=auto, 0=disable [default], 1=GuC submission, 2=HuC load)");
+ "(-1=auto [default], 0=disable, 1=GuC submission, 2=HuC load)");
i915_param_named(guc_log_level, int, 0400,
"GuC firmware logging level. Requires GuC to be loaded. "
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 34ebb0662547..f27eceb82c0f 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -59,7 +59,7 @@ struct drm_printer;
param(int, disable_power_well, -1, 0400) \
param(int, enable_ips, 1, 0600) \
param(int, invert_brightness, 0, 0600) \
- param(int, enable_guc, 0, 0400) \
+ param(int, enable_guc, -1, 0400) \
param(int, guc_log_level, -1, 0400) \
param(char *, guc_firmware_path, NULL, 0400) \
param(char *, huc_firmware_path, NULL, 0400) \
@@ -71,18 +71,18 @@ struct drm_printer;
param(int, fastboot, -1, 0600) \
param(int, enable_dpcd_backlight, -1, 0600) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
- param(unsigned long, fake_lmem_start, 0, 0400) \
- param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, 0600) \
+ param(unsigned long, fake_lmem_start, 0, IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM) ? 0400 : 0) \
+ param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \
/* leave bools at the end to not create holes */ \
param(bool, enable_hangcheck, true, 0600) \
param(bool, load_detect_test, false, 0600) \
param(bool, force_reset_modeset_test, false, 0600) \
- param(bool, error_capture, true, 0600) \
+ param(bool, error_capture, true, IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) ? 0600 : 0) \
param(bool, disable_display, false, 0400) \
param(bool, verbose_state_checks, true, 0) \
param(bool, nuclear_pageflip, false, 0400) \
param(bool, enable_dp_mst, true, 0600) \
- param(bool, enable_gvt, false, 0400)
+ param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0)
#define MEMBER(T, member, ...) T member;
struct i915_params {
@@ -97,4 +97,3 @@ void i915_params_copy(struct i915_params *dest, const struct i915_params *src);
void i915_params_free(struct i915_params *params);
#endif
-
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 480553746794..83b500bb170c 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -36,7 +36,10 @@
#include "i915_selftest.h"
#define PLATFORM(x) .platform = (x)
-#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1), .display.version = (x)
+#define GEN(x) \
+ .graphics_ver = (x), \
+ .media_ver = (x), \
+ .display.ver = (x)
#define I845_PIPE_OFFSETS \
.pipe_offsets = { \
@@ -640,12 +643,12 @@ static const struct intel_device_info chv_info = {
GEN8_FEATURES, \
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
- .display.has_csr = 1, \
+ .display.has_dmc = 1, \
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
.display.has_ipc = 1, \
- .ddb_size = 896, \
- .num_supported_dbuf_slices = 1
+ .dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \
+ .dbuf.slice_mask = BIT(DBUF_S1)
#define SKL_PLATFORM \
GEN9_FEATURES, \
@@ -680,7 +683,7 @@ static const struct intel_device_info skl_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
- .num_supported_dbuf_slices = 1, \
+ .dbuf.slice_mask = BIT(DBUF_S1), \
.display.has_hotplug = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
@@ -695,7 +698,7 @@ static const struct intel_device_info skl_gt4_info = {
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
.has_runtime_pm = 1, \
- .display.has_csr = 1, \
+ .display.has_dmc = 1, \
.has_rc6 = 1, \
.has_rps = true, \
.display.has_dp_mst = 1, \
@@ -717,14 +720,14 @@ static const struct intel_device_info skl_gt4_info = {
static const struct intel_device_info bxt_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_BROXTON),
- .ddb_size = 512,
+ .dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */
};
static const struct intel_device_info glk_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_GEMINILAKE),
- .display.version = 10,
- .ddb_size = 1024,
+ .display.ver = 10,
+ .dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
GLK_COLORS,
};
@@ -787,7 +790,7 @@ static const struct intel_device_info cml_gt2_info = {
#define GEN10_FEATURES \
GEN9_FEATURES, \
GEN(10), \
- .ddb_size = 1024, \
+ .dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */ \
.display.has_dsc = 1, \
.has_coherent_ggtt = false, \
GLK_COLORS
@@ -827,8 +830,8 @@ static const struct intel_device_info cnl_info = {
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
GEN(11), \
- .ddb_size = 2048, \
- .num_supported_dbuf_slices = 2, \
+ .dbuf.size = 2048, \
+ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
.has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
@@ -904,16 +907,16 @@ static const struct intel_device_info rkl_info = {
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
};
-#define GEN12_DGFX_FEATURES \
- GEN12_FEATURES, \
- .memory_regions = REGION_SMEM | REGION_LMEM, \
+#define DGFX_FEATURES \
+ .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
.has_master_unit_irq = 1, \
.has_llc = 0, \
.has_snoop = 1, \
.is_dgfx = 1
static const struct intel_device_info dg1_info __maybe_unused = {
- GEN12_DGFX_FEATURES,
+ GEN12_FEATURES,
+ DGFX_FEATURES,
PLATFORM(INTEL_DG1),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.require_force_probe = 1,
@@ -936,6 +939,29 @@ static const struct intel_device_info adl_s_info = {
.dma_mask_size = 46,
};
+#define XE_LPD_FEATURES \
+ .display.ver = 13, \
+ .display.has_psr_hw_tracking = 0, \
+ .abox_mask = GENMASK(1, 0), \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \
+ .dbuf.size = 4096, \
+ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4)
+
+static const struct intel_device_info adl_p_info = {
+ GEN12_FEATURES,
+ XE_LPD_FEATURES,
+ PLATFORM(INTEL_ALDERLAKE_P),
+ .has_cdclk_crawl = 1,
+ .require_force_probe = 1,
+ .display.has_modular_fia = 1,
+ .platform_engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ .ppgtt_size = 48,
+ .dma_mask_size = 39,
+};
+
#undef GEN
#undef PLATFORM
@@ -1013,6 +1039,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_TGL_12_IDS(&tgl_info),
INTEL_RKL_IDS(&rkl_info),
INTEL_ADLS_IDS(&adl_s_info),
+ INTEL_ADLP_IDS(&adl_p_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 85ad62dbabfa..9f94914958c3 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -719,7 +719,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* it to userspace...
*/
reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
- (IS_GEN(stream->perf->i915, 12) ?
+ (GRAPHICS_VER(stream->perf->i915) == 12 ?
OAREPORT_REASON_MASK_EXTENDED :
OAREPORT_REASON_MASK));
@@ -734,7 +734,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* understand that the ID has been squashed by the kernel.
*/
if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
- INTEL_GEN(stream->perf->i915) <= 11)
+ GRAPHICS_VER(stream->perf->i915) <= 11)
ctx_id = report32[2] = INVALID_CTX_ID;
/*
@@ -801,7 +801,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
if (start_offset != *offset) {
i915_reg_t oaheadptr;
- oaheadptr = IS_GEN(stream->perf->i915, 12) ?
+ oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -854,7 +854,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
return -EIO;
- oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
+ oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
GEN12_OAG_OASTATUS : GEN8_OASTATUS;
oastatus = intel_uncore_read(uncore, oastatus_reg);
@@ -901,7 +901,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
intel_uncore_rmw(uncore, oastatus_reg,
GEN8_OASTATUS_COUNTER_OVERFLOW |
GEN8_OASTATUS_REPORT_LOST,
- IS_GEN_RANGE(uncore->i915, 8, 11) ?
+ IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
(GEN8_OASTATUS_HEAD_POINTER_WRAP |
GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
}
@@ -1243,7 +1243,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
if (IS_ERR(ce))
return PTR_ERR(ce);
- switch (INTEL_GEN(ce->engine->i915)) {
+ switch (GRAPHICS_VER(ce->engine->i915)) {
case 7: {
/*
* On Haswell we don't do any post processing of the reports
@@ -1257,11 +1257,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
case 8:
case 9:
case 10:
- if (intel_engine_in_execlists_submission_mode(ce->engine)) {
- stream->specific_ctx_id_mask =
- (1U << GEN8_CTX_ID_WIDTH) - 1;
- stream->specific_ctx_id = stream->specific_ctx_id_mask;
- } else {
+ if (intel_engine_uses_guc(ce->engine)) {
/*
* When using GuC, the context descriptor we write in
* i915 is read by GuC and rewritten before it's
@@ -1280,6 +1276,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*/
stream->specific_ctx_id_mask =
(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
+ } else {
+ stream->specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
}
break;
@@ -1297,7 +1297,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
}
default:
- MISSING_CASE(INTEL_GEN(ce->engine->i915));
+ MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
}
ce->tag = stream->specific_ctx_id;
@@ -1602,7 +1602,7 @@ static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
cmd |= MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(stream->perf->i915) >= 8)
+ if (GRAPHICS_VER(stream->perf->i915) >= 8)
cmd++;
for (d = 0; d < dword_count; d++) {
@@ -1731,7 +1731,7 @@ retry:
*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
/* Restart from the beginning if we had timestamps roll over. */
- *cs++ = (INTEL_GEN(i915) < 8 ?
+ *cs++ = (GRAPHICS_VER(i915) < 8 ?
MI_BATCH_BUFFER_START :
MI_BATCH_BUFFER_START_GEN8) |
MI_BATCH_PREDICATE;
@@ -1768,7 +1768,7 @@ retry:
*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
/* Predicate the jump. */
- *cs++ = (INTEL_GEN(i915) < 8 ?
+ *cs++ = (GRAPHICS_VER(i915) < 8 ?
MI_BATCH_BUFFER_START :
MI_BATCH_BUFFER_START_GEN8) |
MI_BATCH_PREDICATE;
@@ -1892,7 +1892,7 @@ retry:
oa_config->flex_regs_len);
/* Jump into the active wait. */
- *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
+ *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
MI_BATCH_BUFFER_START :
MI_BATCH_BUFFER_START_GEN8);
*cs++ = i915_ggtt_offset(stream->noa_wait);
@@ -2492,7 +2492,7 @@ gen8_enable_metric_set(struct i915_perf_stream *stream,
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
+ if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
intel_uncore_write(uncore, GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2797,7 +2797,7 @@ get_default_sseu_config(struct intel_sseu *out_sseu,
*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
- if (IS_GEN(engine->i915, 11)) {
+ if (GRAPHICS_VER(engine->i915) == 11) {
/*
* We only need subslice count so it doesn't matter which ones
* we select - just turn off low bits in the amount of half of
@@ -2864,7 +2864,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
}
if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
- (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
+ (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
DRM_DEBUG("Only OA report sampling supported\n");
return -EINVAL;
}
@@ -3006,7 +3006,7 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
stream = READ_ONCE(engine->i915->perf.exclusive_stream);
- if (stream && INTEL_GEN(stream->perf->i915) < 12)
+ if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
gen8_update_reg_state_unlocked(ce, stream);
}
@@ -3443,7 +3443,7 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
*/
if (IS_HASWELL(perf->i915) && specific_ctx)
privileged_op = false;
- else if (IS_GEN(perf->i915, 12) && specific_ctx &&
+ else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
(props->sample_flags & SAMPLE_OA_REPORT) == 0)
privileged_op = false;
@@ -4119,7 +4119,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
}
oa_config->b_counter_regs = regs;
- if (INTEL_GEN(perf->i915) < 8) {
+ if (GRAPHICS_VER(perf->i915) < 8) {
if (args->n_flex_regs != 0) {
err = -EINVAL;
goto reg_err;
@@ -4318,6 +4318,7 @@ static void oa_init_supported_formats(struct i915_perf *perf)
case INTEL_ROCKETLAKE:
case INTEL_DG1:
case INTEL_ALDERLAKE_S:
+ case INTEL_ALDERLAKE_P:
oa_format_add(perf, I915_OA_FORMAT_A12);
oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
@@ -4364,7 +4365,7 @@ void i915_perf_init(struct drm_i915_private *i915)
*/
perf->ops.read = gen8_oa_read;
- if (IS_GEN_RANGE(i915, 8, 9)) {
+ if (IS_GRAPHICS_VER(i915, 8, 9)) {
perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
@@ -4383,7 +4384,7 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.disable_metric_set = gen8_disable_metric_set;
perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(i915, 8)) {
+ if (GRAPHICS_VER(i915) == 8) {
perf->ctx_oactxctrl_offset = 0x120;
perf->ctx_flexeu0_offset = 0x2ce;
@@ -4394,7 +4395,7 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->gen8_valid_ctx_bit = BIT(16);
}
- } else if (IS_GEN_RANGE(i915, 10, 11)) {
+ } else if (IS_GRAPHICS_VER(i915, 10, 11)) {
perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
@@ -4408,7 +4409,7 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.disable_metric_set = gen10_disable_metric_set;
perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(i915, 10)) {
+ if (GRAPHICS_VER(i915) == 10) {
perf->ctx_oactxctrl_offset = 0x128;
perf->ctx_flexeu0_offset = 0x3de;
} else {
@@ -4416,7 +4417,7 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ctx_flexeu0_offset = 0x78e;
}
perf->gen8_valid_ctx_bit = BIT(16);
- } else if (IS_GEN(i915, 12)) {
+ } else if (GRAPHICS_VER(i915) == 12) {
perf->ops.is_valid_b_counter_reg =
gen12_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 41651ac255fa..34d37d46a126 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -287,7 +287,7 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915)
* risk a machine hang. For a fun history lesson dig out the old
* userspace intel_gpu_top and run it on Ivybridge or Haswell!
*/
- return IS_GEN(i915, 7);
+ return GRAPHICS_VER(i915) == 7;
}
static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
@@ -463,7 +463,7 @@ engine_event_status(struct intel_engine_cs *engine,
case I915_SAMPLE_WAIT:
break;
case I915_SAMPLE_SEMA:
- if (INTEL_GEN(engine->i915) < 6)
+ if (GRAPHICS_VER(engine->i915) < 6)
return -ENODEV;
break;
default:
@@ -476,6 +476,8 @@ engine_event_status(struct intel_engine_cs *engine,
static int
config_status(struct drm_i915_private *i915, u64 config)
{
+ struct intel_gt *gt = &i915->gt;
+
switch (config) {
case I915_PMU_ACTUAL_FREQUENCY:
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
@@ -483,13 +485,13 @@ config_status(struct drm_i915_private *i915, u64 config)
return -ENODEV;
fallthrough;
case I915_PMU_REQUESTED_FREQUENCY:
- if (INTEL_GEN(i915) < 6)
+ if (GRAPHICS_VER(i915) < 6)
return -ENODEV;
break;
case I915_PMU_INTERRUPTS:
break;
case I915_PMU_RC6_RESIDENCY:
- if (!HAS_RC6(i915))
+ if (!gt->rc6.supported)
return -ENODEV;
break;
case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
@@ -834,15 +836,13 @@ static ssize_t i915_pmu_event_show(struct device *dev,
return sprintf(buf, "config=0x%lx\n", eattr->val);
}
-static ssize_t
-i915_pmu_get_attr_cpumask(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
}
-static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
+static DEVICE_ATTR_RO(cpumask);
static struct attribute *i915_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
@@ -1145,7 +1145,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
int ret = -ENOMEM;
- if (INTEL_GEN(i915) <= 2) {
+ if (GRAPHICS_VER(i915) <= 2) {
drm_info(&i915->drm, "PMU not supported for this GPU.");
return;
}
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index fed337ad7b68..e49da36c62fb 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -419,11 +419,73 @@ static int query_perf_config(struct drm_i915_private *i915,
}
}
+static int query_memregion_info(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ struct drm_i915_query_memory_regions __user *query_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct drm_i915_memory_region_info __user *info_ptr =
+ &query_ptr->regions[0];
+ struct drm_i915_memory_region_info info = { };
+ struct drm_i915_query_memory_regions query;
+ struct intel_memory_region *mr;
+ u32 total_length;
+ int ret, id, i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM))
+ return -ENODEV;
+
+ if (query_item->flags != 0)
+ return -EINVAL;
+
+ total_length = sizeof(query);
+ for_each_memory_region(mr, i915, id) {
+ if (mr->private)
+ continue;
+
+ total_length += sizeof(info);
+ }
+
+ ret = copy_query_item(&query, sizeof(query), total_length, query_item);
+ if (ret != 0)
+ return ret;
+
+ if (query.num_regions)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) {
+ if (query.rsvd[i])
+ return -EINVAL;
+ }
+
+ for_each_memory_region(mr, i915, id) {
+ if (mr->private)
+ continue;
+
+ info.region.memory_class = mr->type;
+ info.region.memory_instance = mr->instance;
+ info.probed_size = mr->total;
+ info.unallocated_size = mr->avail;
+
+ if (__copy_to_user(info_ptr, &info, sizeof(info)))
+ return -EFAULT;
+
+ query.num_regions++;
+ info_ptr++;
+ }
+
+ if (__copy_to_user(query_ptr, &query, sizeof(query)))
+ return -EFAULT;
+
+ return total_length;
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
query_engine_info,
query_perf_config,
+ query_memregion_info,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cbf7a60afe54..e915ec034c98 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -416,6 +416,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
+#define GEN12_HCP_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x2910)
+#define GEN12_HCP_SFC_FORCED_LOCK_BIT REG_BIT(0)
+#define GEN12_HCP_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x2914)
+#define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1)
+#define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0)
+
#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100)
#define GEN12_SFC_DONE_MAX 4
@@ -487,6 +493,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GAB_CTL _MMIO(0x24000)
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
+#define GU_CNTL _MMIO(0x101010)
+#define LMEM_INIT REG_BIT(7)
+
#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0)
#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18)
@@ -2715,6 +2724,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */
+#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10)
+#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
+
#define ERROR_GEN6 _MMIO(0x40a0)
#define GEN7_ERR_INT _MMIO(0x44040)
#define ERR_INT_POISON (1 << 31)
@@ -2929,6 +2941,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
+#define MBUS_CTL _MMIO(0x4438C)
+#define MBUS_JOIN REG_BIT(31)
+#define MBUS_HASHING_MODE_MASK REG_BIT(30)
+#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
+#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
+#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
+#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
+#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+
#define HDPORT_STATE _MMIO(0x45050)
#define HDPORT_DPLL_USED_MASK REG_GENMASK(15, 12)
#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1)
@@ -3781,8 +3802,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define CSHRDDR3CTL_DDR3 (1 << 2)
/* 965 MCH register controlling DRAM channel configuration */
-#define C0DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x206)
-#define C1DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x606)
+#define C0DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x206)
+#define C1DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x606)
/* snb MCH registers for reading the DRAM channel configuration */
#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004)
@@ -4170,6 +4191,9 @@ enum {
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
#define BXT_GMBUS_GATING_DIS (1 << 14)
+#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
+#define DPCE_GATING_DIS REG_BIT(17)
+
#define _CLKGATE_DIS_PSL_A 0x46520
#define _CLKGATE_DIS_PSL_B 0x46524
#define _CLKGATE_DIS_PSL_C 0x46528
@@ -4367,6 +4391,8 @@ enum {
#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
+#define XELPD_VRR_CTL_VRR_GUARDBAND_MASK REG_GENMASK(15, 0)
+#define XELPD_VRR_CTL_VRR_GUARDBAND(x) REG_FIELD_PREP(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, (x))
#define _TRANS_VRR_VMAX_A 0x60424
#define _TRANS_VRR_VMAX_B 0x61424
@@ -4563,8 +4589,7 @@ enum {
#define EDP_SU_TRACK_ENABLE (1 << 30)
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28)
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28)
-#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
-#define EDP_Y_COORDINATE_ENABLE (1 << 25) /* GLK and CNL+ */
+#define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */
#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
#define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8
@@ -6127,6 +6152,10 @@ enum {
#define _PIPEBGCMAX 0x71010
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
+#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
+#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(pipe, _PIPE_ARB_CTL_A)
+#define PIPE_ARB_USE_PROG_SLOTS REG_BIT(13)
+
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_YUV420_ENABLE (1 << 27) /* glk+ */
@@ -6144,12 +6173,26 @@ enum {
#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
+#define _PIPE_MISC2_A 0x7002C
+#define _PIPE_MISC2_B 0x7102C
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN (0x50 << 24)
+#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS (0x14 << 24)
+#define PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK (0xff << 24)
+#define PIPE_MISC2(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC2_A)
+
/* Skylake+ pipe bottom (background) color */
#define _SKL_BOTTOM_COLOR_A 0x70034
#define SKL_BOTTOM_COLOR_GAMMA_ENABLE (1 << 31)
#define SKL_BOTTOM_COLOR_CSC_ENABLE (1 << 30)
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE2(pipe, _SKL_BOTTOM_COLOR_A)
+#define _ICL_PIPE_A_STATUS 0x70058
+#define ICL_PIPESTATUS(pipe) _MMIO_PIPE2(pipe, _ICL_PIPE_A_STATUS)
+#define PIPE_STATUS_UNDERRUN REG_BIT(31)
+#define PIPE_STATUS_SOFT_UNDERRUN_XELPD REG_BIT(28)
+#define PIPE_STATUS_HARD_UNDERRUN_XELPD REG_BIT(27)
+#define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26)
+
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1 << 29)
#define PIPEB_HLINE_INT_EN (1 << 28)
@@ -6418,37 +6461,61 @@ enum {
/* Watermark register definitions for SKL */
#define _CUR_WM_A_0 0x70140
#define _CUR_WM_B_0 0x71140
+#define _CUR_WM_SAGV_A 0x70158
+#define _CUR_WM_SAGV_B 0x71158
+#define _CUR_WM_SAGV_TRANS_A 0x7015C
+#define _CUR_WM_SAGV_TRANS_B 0x7115C
+#define _CUR_WM_TRANS_A 0x70168
+#define _CUR_WM_TRANS_B 0x71168
#define _PLANE_WM_1_A_0 0x70240
#define _PLANE_WM_1_B_0 0x71240
#define _PLANE_WM_2_A_0 0x70340
#define _PLANE_WM_2_B_0 0x71340
-#define _PLANE_WM_TRANS_1_A_0 0x70268
-#define _PLANE_WM_TRANS_1_B_0 0x71268
-#define _PLANE_WM_TRANS_2_A_0 0x70368
-#define _PLANE_WM_TRANS_2_B_0 0x71368
-#define _CUR_WM_TRANS_A_0 0x70168
-#define _CUR_WM_TRANS_B_0 0x71168
+#define _PLANE_WM_SAGV_1_A 0x70258
+#define _PLANE_WM_SAGV_1_B 0x71258
+#define _PLANE_WM_SAGV_2_A 0x70358
+#define _PLANE_WM_SAGV_2_B 0x71358
+#define _PLANE_WM_SAGV_TRANS_1_A 0x7025C
+#define _PLANE_WM_SAGV_TRANS_1_B 0x7125C
+#define _PLANE_WM_SAGV_TRANS_2_A 0x7035C
+#define _PLANE_WM_SAGV_TRANS_2_B 0x7135C
+#define _PLANE_WM_TRANS_1_A 0x70268
+#define _PLANE_WM_TRANS_1_B 0x71268
+#define _PLANE_WM_TRANS_2_A 0x70368
+#define _PLANE_WM_TRANS_2_B 0x71368
#define PLANE_WM_EN (1 << 31)
#define PLANE_WM_IGNORE_LINES (1 << 30)
-#define PLANE_WM_LINES_SHIFT 14
-#define PLANE_WM_LINES_MASK 0x1f
-#define PLANE_WM_BLOCKS_MASK 0x7ff /* skl+: 10 bits, icl+ 11 bits */
+#define PLANE_WM_LINES_MASK REG_GENMASK(26, 14)
+#define PLANE_WM_BLOCKS_MASK REG_GENMASK(11, 0)
#define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0)
#define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level)))
-#define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A_0, _CUR_WM_TRANS_B_0)
-
+#define CUR_WM_SAGV(pipe) _MMIO_PIPE(pipe, _CUR_WM_SAGV_A, _CUR_WM_SAGV_B)
+#define CUR_WM_SAGV_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_SAGV_TRANS_A, _CUR_WM_SAGV_TRANS_B)
+#define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A, _CUR_WM_TRANS_B)
#define _PLANE_WM_1(pipe) _PIPE(pipe, _PLANE_WM_1_A_0, _PLANE_WM_1_B_0)
#define _PLANE_WM_2(pipe) _PIPE(pipe, _PLANE_WM_2_A_0, _PLANE_WM_2_B_0)
-#define _PLANE_WM_BASE(pipe, plane) \
- _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
-#define PLANE_WM(pipe, plane, level) \
- _MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
-#define _PLANE_WM_TRANS_1(pipe) \
- _PIPE(pipe, _PLANE_WM_TRANS_1_A_0, _PLANE_WM_TRANS_1_B_0)
-#define _PLANE_WM_TRANS_2(pipe) \
- _PIPE(pipe, _PLANE_WM_TRANS_2_A_0, _PLANE_WM_TRANS_2_B_0)
-#define PLANE_WM_TRANS(pipe, plane) \
+#define _PLANE_WM_BASE(pipe, plane) \
+ _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
+#define PLANE_WM(pipe, plane, level) \
+ _MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
+#define _PLANE_WM_SAGV_1(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_1_A, _PLANE_WM_SAGV_1_B)
+#define _PLANE_WM_SAGV_2(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_2_A, _PLANE_WM_SAGV_2_B)
+#define PLANE_WM_SAGV(pipe, plane) \
+ _MMIO(_PLANE(plane, _PLANE_WM_SAGV_1(pipe), _PLANE_WM_SAGV_2(pipe)))
+#define _PLANE_WM_SAGV_TRANS_1(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_TRANS_1_A, _PLANE_WM_SAGV_TRANS_1_B)
+#define _PLANE_WM_SAGV_TRANS_2(pipe) \
+ _PIPE(pipe, _PLANE_WM_SAGV_TRANS_2_A, _PLANE_WM_SAGV_TRANS_2_B)
+#define PLANE_WM_SAGV_TRANS(pipe, plane) \
+ _MMIO(_PLANE(plane, _PLANE_WM_SAGV_TRANS_1(pipe), _PLANE_WM_SAGV_TRANS_2(pipe)))
+#define _PLANE_WM_TRANS_1(pipe) \
+ _PIPE(pipe, _PLANE_WM_TRANS_1_A, _PLANE_WM_TRANS_1_B)
+#define _PLANE_WM_TRANS_2(pipe) \
+ _PIPE(pipe, _PLANE_WM_TRANS_2_A, _PLANE_WM_TRANS_2_B)
+#define PLANE_WM_TRANS(pipe, plane) \
_MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)))
/* define the Watermark register on Ironlake */
@@ -6553,6 +6620,8 @@ enum {
#define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX)
#define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX)
#define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX)
+#define MCURSOR_ARB_SLOTS_MASK REG_GENMASK(30, 28) /* icl+ */
+#define MCURSOR_ARB_SLOTS(x) REG_FIELD_PREP(MCURSOR_ARB_SLOTS_MASK, (x)) /* icl+ */
#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28)
#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
@@ -7004,6 +7073,8 @@ enum {
#define _PLANE_CTL_2_A 0x70280
#define _PLANE_CTL_3_A 0x70380
#define PLANE_CTL_ENABLE (1 << 31)
+#define PLANE_CTL_ARB_SLOTS_MASK REG_GENMASK(30, 28) /* icl+ */
+#define PLANE_CTL_ARB_SLOTS(x) REG_FIELD_PREP(PLANE_CTL_ARB_SLOTS_MASK, (x)) /* icl+ */
#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-GLK */
#define PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
/*
@@ -7207,6 +7278,8 @@ enum {
_PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
#define PLANE_STRIDE(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
+#define PLANE_STRIDE_MASK REG_GENMASK(10, 0)
+#define PLANE_STRIDE_MASK_XELPD REG_GENMASK(11, 0)
#define _PLANE_POS_1_B 0x7118c
#define _PLANE_POS_2_B 0x7128c
@@ -7265,7 +7338,7 @@ enum {
#define _PLANE_BUF_CFG_1_B 0x7127c
#define _PLANE_BUF_CFG_2_B 0x7137c
-#define DDB_ENTRY_MASK 0x7FF /* skl+: 10 bits, icl+ 11 bits */
+#define DDB_ENTRY_MASK 0xFFF /* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */
#define DDB_ENTRY_END_SHIFT 16
#define _PLANE_BUF_CFG_1(pipe) \
_PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
@@ -7677,20 +7750,20 @@ enum {
#define GAMMA_MODE_MODE_SPLIT (3 << 0) /* ivb-bdw */
#define GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED (3 << 0) /* icl + */
-/* DMC/CSR */
-#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
-#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
-#define CSR_HTP_ADDR_SKL 0x00500034
-#define CSR_SSP_BASE _MMIO(0x8F074)
-#define CSR_HTP_SKL _MMIO(0x8F004)
-#define CSR_LAST_WRITE _MMIO(0x8F034)
-#define CSR_LAST_WRITE_VALUE 0xc003b400
-/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
-#define CSR_MMIO_START_RANGE 0x80000
-#define CSR_MMIO_END_RANGE 0x8FFFF
-#define SKL_CSR_DC3_DC5_COUNT _MMIO(0x80030)
-#define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
-#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
+/* DMC */
+#define DMC_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
+#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
+#define DMC_HTP_ADDR_SKL 0x00500034
+#define DMC_SSP_BASE _MMIO(0x8F074)
+#define DMC_HTP_SKL _MMIO(0x8F004)
+#define DMC_LAST_WRITE _MMIO(0x8F034)
+#define DMC_LAST_WRITE_VALUE 0xc003b400
+/* MMIO address range for DMC program (0x80000 - 0x82FFF) */
+#define DMC_MMIO_START_RANGE 0x80000
+#define DMC_MMIO_END_RANGE 0x8FFFF
+#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
+#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
+#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
#define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154)
@@ -7784,6 +7857,8 @@ enum {
#define GEN8_GT_BCS_IRQ (1 << 1)
#define GEN8_GT_RCS_IRQ (1 << 0)
+#define XELPD_DISPLAY_ERR_FATAL_MASK _MMIO(0x4421c)
+
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
@@ -7803,6 +7878,8 @@ enum {
#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
+#define XELPD_PIPE_SOFT_UNDERRUN (1 << 22)
+#define XELPD_PIPE_HARD_UNDERRUN (1 << 21)
#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
@@ -7866,15 +7943,17 @@ enum {
#define BDW_DE_PORT_HOTPLUG_MASK GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)
#define BXT_DE_PORT_GMBUS (1 << 1)
#define GEN8_AUX_CHANNEL_A (1 << 0)
-#define TGL_DE_PORT_AUX_USBC6 (1 << 13)
-#define TGL_DE_PORT_AUX_USBC5 (1 << 12)
-#define TGL_DE_PORT_AUX_USBC4 (1 << 11)
-#define TGL_DE_PORT_AUX_USBC3 (1 << 10)
-#define TGL_DE_PORT_AUX_USBC2 (1 << 9)
-#define TGL_DE_PORT_AUX_USBC1 (1 << 8)
-#define TGL_DE_PORT_AUX_DDIC (1 << 2)
-#define TGL_DE_PORT_AUX_DDIB (1 << 1)
-#define TGL_DE_PORT_AUX_DDIA (1 << 0)
+#define TGL_DE_PORT_AUX_USBC6 REG_BIT(13)
+#define XELPD_DE_PORT_AUX_DDIE REG_BIT(13)
+#define TGL_DE_PORT_AUX_USBC5 REG_BIT(12)
+#define XELPD_DE_PORT_AUX_DDID REG_BIT(12)
+#define TGL_DE_PORT_AUX_USBC4 REG_BIT(11)
+#define TGL_DE_PORT_AUX_USBC3 REG_BIT(10)
+#define TGL_DE_PORT_AUX_USBC2 REG_BIT(9)
+#define TGL_DE_PORT_AUX_USBC1 REG_BIT(8)
+#define TGL_DE_PORT_AUX_DDIC REG_BIT(2)
+#define TGL_DE_PORT_AUX_DDIB REG_BIT(1)
+#define TGL_DE_PORT_AUX_DDIA REG_BIT(0)
#define GEN8_DE_MISC_ISR _MMIO(0x44460)
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
@@ -8096,13 +8175,29 @@ enum {
#define DISP_DATA_PARTITION_5_6 (1 << 6)
#define DISP_IPC_ENABLE (1 << 3)
-#define _DBUF_CTL_S1 0x45008
-#define _DBUF_CTL_S2 0x44FE8
-#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
+/*
+ * The below are numbered starting from "S1" on gen11/gen12, but starting
+ * with gen13 display, the bspec switches to a 0-based numbering scheme
+ * (although the addresses stay the same so new S0 = old S1, new S1 = old S2).
+ * We'll just use the 0-based numbering here for all platforms since it's the
+ * way things will be named by the hardware team going forward, plus it's more
+ * consistent with how most of the rest of our registers are named.
+ */
+#define _DBUF_CTL_S0 0x45008
+#define _DBUF_CTL_S1 0x44FE8
+#define _DBUF_CTL_S2 0x44300
+#define _DBUF_CTL_S3 0x44304
+#define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \
+ _DBUF_CTL_S0, \
+ _DBUF_CTL_S1, \
+ _DBUF_CTL_S2, \
+ _DBUF_CTL_S3))
#define DBUF_POWER_REQUEST REG_BIT(31)
#define DBUF_POWER_STATE REG_BIT(30)
#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19)
#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x)
+#define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */
+#define DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x) /* ADL-P+ */
#define GEN7_MSG_CTL _MMIO(0x45010)
#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
@@ -8287,6 +8382,7 @@ enum {
#define _PIPEC_CHICKEN 0x72038
#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
_PIPEB_CHICKEN)
+#define UNDERRUN_RECOVERY_DISABLE REG_BIT(30)
#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
@@ -9632,6 +9728,12 @@ enum {
#define ICL_PW_CTL_IDX_PW_2 1
#define ICL_PW_CTL_IDX_PW_1 0
+/* XE_LPD - power wells */
+#define XELPD_PW_CTL_IDX_PW_D 8
+#define XELPD_PW_CTL_IDX_PW_C 7
+#define XELPD_PW_CTL_IDX_PW_B 6
+#define XELPD_PW_CTL_IDX_PW_A 5
+
#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
@@ -9646,7 +9748,9 @@ enum {
#define TGL_PW_CTL_IDX_AUX_TBT1 9
#define ICL_PW_CTL_IDX_AUX_TBT1 8
#define TGL_PW_CTL_IDX_AUX_TC6 8
+#define XELPD_PW_CTL_IDX_AUX_E 8
#define TGL_PW_CTL_IDX_AUX_TC5 7
+#define XELPD_PW_CTL_IDX_AUX_D 7
#define TGL_PW_CTL_IDX_AUX_TC4 6
#define ICL_PW_CTL_IDX_AUX_F 5
#define TGL_PW_CTL_IDX_AUX_TC3 5
@@ -9661,7 +9765,9 @@ enum {
#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
+#define XELPD_PW_CTL_IDX_DDI_E 8
#define TGL_PW_CTL_IDX_DDI_TC6 8
+#define XELPD_PW_CTL_IDX_DDI_D 7
#define TGL_PW_CTL_IDX_DDI_TC5 7
#define TGL_PW_CTL_IDX_DDI_TC4 6
#define ICL_PW_CTL_IDX_DDI_F 5
@@ -9802,7 +9908,7 @@ enum skl_power_gate {
#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
_TRANSB_HDCP_CONF)
#define HDCP_CONF(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_CONF(trans) : \
PORT_HDCP_CONF(port))
@@ -9815,7 +9921,7 @@ enum skl_power_gate {
_TRANSA_HDCP_ANINIT, \
_TRANSB_HDCP_ANINIT)
#define HDCP_ANINIT(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_ANINIT(trans) : \
PORT_HDCP_ANINIT(port))
@@ -9825,7 +9931,7 @@ enum skl_power_gate {
#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
_TRANSB_HDCP_ANLO)
#define HDCP_ANLO(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_ANLO(trans) : \
PORT_HDCP_ANLO(port))
@@ -9835,7 +9941,7 @@ enum skl_power_gate {
#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
_TRANSB_HDCP_ANHI)
#define HDCP_ANHI(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_ANHI(trans) : \
PORT_HDCP_ANHI(port))
@@ -9846,7 +9952,7 @@ enum skl_power_gate {
_TRANSA_HDCP_BKSVLO, \
_TRANSB_HDCP_BKSVLO)
#define HDCP_BKSVLO(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_BKSVLO(trans) : \
PORT_HDCP_BKSVLO(port))
@@ -9857,7 +9963,7 @@ enum skl_power_gate {
_TRANSA_HDCP_BKSVHI, \
_TRANSB_HDCP_BKSVHI)
#define HDCP_BKSVHI(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_BKSVHI(trans) : \
PORT_HDCP_BKSVHI(port))
@@ -9868,7 +9974,7 @@ enum skl_power_gate {
_TRANSA_HDCP_RPRIME, \
_TRANSB_HDCP_RPRIME)
#define HDCP_RPRIME(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_RPRIME(trans) : \
PORT_HDCP_RPRIME(port))
@@ -9879,7 +9985,7 @@ enum skl_power_gate {
_TRANSA_HDCP_STATUS, \
_TRANSB_HDCP_STATUS)
#define HDCP_STATUS(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP_STATUS(trans) : \
PORT_HDCP_STATUS(port))
@@ -9920,7 +10026,7 @@ enum skl_power_gate {
#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
#define AUTH_CLR_KEYS BIT(18)
#define HDCP2_AUTH(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_AUTH(trans) : \
PORT_HDCP2_AUTH(port))
@@ -9931,7 +10037,7 @@ enum skl_power_gate {
_TRANSB_HDCP2_CTL)
#define CTL_LINK_ENCRYPTION_REQ BIT(31)
#define HDCP2_CTL(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_CTL(trans) : \
PORT_HDCP2_CTL(port))
@@ -9945,7 +10051,7 @@ enum skl_power_gate {
#define LINK_AUTH_STATUS BIT(21)
#define LINK_ENCRYPTION_STATUS BIT(20)
#define HDCP2_STATUS(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_STATUS(trans) : \
PORT_HDCP2_STATUS(port))
@@ -9967,7 +10073,7 @@ enum skl_power_gate {
#define STREAM_ENCRYPTION_STATUS BIT(31)
#define STREAM_TYPE_STATUS BIT(30)
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
PIPE_HDCP2_STREAM_STATUS(pipe))
@@ -9983,7 +10089,7 @@ enum skl_power_gate {
_TRANSB_HDCP2_AUTH_STREAM)
#define AUTH_STREAM_TYPE BIT(31)
#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
- (INTEL_GEN(dev_priv) >= 12 ? \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
TRANS_HDCP2_AUTH_STREAM(trans) : \
PORT_HDCP2_AUTH_STREAM(port))
@@ -10099,8 +10205,10 @@ enum skl_power_gate {
#define DDI_BUF_CTL_ENABLE (1 << 31)
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
#define DDI_BUF_EMP_MASK (0xf << 24)
+#define DDI_BUF_PHY_LINK_RATE(r) ((r) << 20)
#define DDI_BUF_PORT_REVERSAL (1 << 16)
#define DDI_BUF_IS_IDLE (1 << 7)
+#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
#define DDI_A_4_LANES (1 << 4)
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
#define DDI_PORT_WIDTH_MASK (7 << 1)
@@ -10463,6 +10571,14 @@ enum skl_power_gate {
#define DG1_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
_MG_PLL1_ENABLE, _MG_PLL2_ENABLE)
+/* ADL-P Type C PLL */
+#define PORTTC1_PLL_ENABLE 0x46038
+#define PORTTC2_PLL_ENABLE 0x46040
+
+#define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \
+ PORTTC1_PLL_ENABLE, \
+ PORTTC2_PLL_ENABLE)
+
#define _MG_REFCLKIN_CTL_PORT1 0x16892C
#define _MG_REFCLKIN_CTL_PORT2 0x16992C
#define _MG_REFCLKIN_CTL_PORT3 0x16A92C
@@ -10811,6 +10927,7 @@ enum skl_power_gate {
_DKL_TX_DPCNTL1)
#define _DKL_TX_DPCNTL2 0x2C8
+#define DKL_TX_LOADGEN_SHARING_PMD_DISABLE REG_BIT(12)
#define DKL_TX_DP20BITMODE (1 << 2)
#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
_DKL_PHY1_BASE, \
@@ -10876,6 +10993,8 @@ enum skl_power_gate {
#define BXT_DE_PLL_ENABLE _MMIO(0x46070)
#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
#define BXT_DE_PLL_LOCK (1 << 30)
+#define BXT_DE_PLL_FREQ_REQ (1 << 23)
+#define BXT_DE_PLL_FREQ_REQ_ACK (1 << 22)
#define CNL_CDCLK_PLL_RATIO(x) (x)
#define CNL_CDCLK_PLL_RATIO_MASK 0xff
@@ -11250,6 +11369,12 @@ enum skl_power_gate {
#define ICL_ESC_CLK_DIV_SHIFT 0
#define DSI_MAX_ESC_CLK 20000 /* in KHz */
+#define _ADL_MIPIO_REG 0x180
+#define ADL_MIPIO_DW(port, dw) _MMIO(_ICL_COMBOPHY(port) + _ADL_MIPIO_REG + 4 * (dw))
+#define TX_ESC_CLK_DIV_PHY_SEL REGBIT(16)
+#define TX_ESC_CLK_DIV_PHY_MASK REG_GENMASK(23, 16)
+#define TX_ESC_CLK_DIV_PHY REG_FIELD_PREP(TX_ESC_CLK_DIV_PHY_MASK, 0x7f)
+
#define _DSI_CMD_FRMCTL_0 0x6b034
#define _DSI_CMD_FRMCTL_1 0x6b834
#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \
@@ -11472,6 +11597,8 @@ enum skl_power_gate {
#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
+#define UNCOMPRESSED_JOINER_MASTER (1 << 21)
+#define UNCOMPRESSED_JOINER_SLAVE (1 << 20)
#define _ICL_PIPE_DSS_CTL2_PB 0x78204
#define _ICL_PIPE_DSS_CTL2_PC 0x78404
@@ -12188,6 +12315,7 @@ enum skl_power_gate {
#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
#define GEN12_GSMBASE _MMIO(0x108100)
+#define GEN12_DSMBASE _MMIO(0x1080C0)
/* gamt regs */
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
@@ -12533,6 +12661,15 @@ enum skl_power_gate {
#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+#define _TCSS_DDI_STATUS_1 0x161500
+#define _TCSS_DDI_STATUS_2 0x161504
+#define TCSS_DDI_STATUS(tc) _MMIO(_PICK_EVEN(tc, \
+ _TCSS_DDI_STATUS_1, \
+ _TCSS_DDI_STATUS_2))
+#define TCSS_DDI_STATUS_READY REG_BIT(2)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
+#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
+
/* This register controls the Display State Buffer (DSB) engines. */
#define _DSBSL_INSTANCE_BASE 0x70B00
#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
@@ -12549,4 +12686,7 @@ enum skl_power_gate {
#define TGL_ROOT_DEVICE_SKU_ULX 0x2
#define TGL_ROOT_DEVICE_SKU_ULT 0x4
+#define CLKREQ_POLICY _MMIO(0x101038)
+#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index bec9c3652188..1014c71cf7f5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -929,7 +929,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
u32 seqno;
int ret;
- might_sleep_if(gfpflags_allow_blocking(gfp));
+ might_alloc(gfp);
/* Check that the caller provided an already pinned context */
__intel_context_pin(ce);
@@ -1176,12 +1176,12 @@ __emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
u32 seqno)
{
- const int has_token = INTEL_GEN(to->engine->i915) >= 12;
+ const int has_token = GRAPHICS_VER(to->engine->i915) >= 12;
u32 hwsp_offset;
int len, err;
u32 *cs;
- GEM_BUG_ON(INTEL_GEN(to->engine->i915) < 8);
+ GEM_BUG_ON(GRAPHICS_VER(to->engine->i915) < 8);
GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
/* We need to pin the signaler's HWSP until we are finished reading. */
@@ -1594,8 +1594,8 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
+ &shared);
if (ret)
return ret;
@@ -1611,7 +1611,7 @@ i915_request_await_object(struct i915_request *to,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = dma_resv_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_unlocked(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
index cc6b3846a8c7..69e9e6c3135e 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -6,6 +6,10 @@
#include "i915_scatterlist.h"
+#include <drm/drm_mm.h>
+
+#include <linux/slab.h>
+
bool i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
@@ -34,6 +38,72 @@ bool i915_sg_trim(struct sg_table *orig_st)
return true;
}
+/**
+ * i915_sg_from_mm_node - Create an sg_table from a struct drm_mm_node
+ * @node: The drm_mm_node.
+ * @region_start: An offset to add to the dma addresses of the sg list.
+ *
+ * Create a struct sg_table, initializing it from a struct drm_mm_node,
+ * taking a maximum segment length into account, splitting into segments
+ * if necessary.
+ *
+ * Return: A pointer to a kmalloced struct sg_table on success, negative
+ * error code cast to an error pointer on failure.
+ */
+struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
+ u64 region_start)
+{
+ const u64 max_segment = SZ_1G; /* Do we have a limit on this? */
+ u64 segment_pages = max_segment >> PAGE_SHIFT;
+ u64 block_size, offset, prev_end;
+ struct sg_table *st;
+ struct scatterlist *sg;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages),
+ GFP_KERNEL)) {
+ kfree(st);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = st->sgl;
+ st->nents = 0;
+ prev_end = (resource_size_t)-1;
+ block_size = node->size << PAGE_SHIFT;
+ offset = node->start << PAGE_SHIFT;
+
+ while (block_size) {
+ u64 len;
+
+ if (offset != prev_end || sg->length >= max_segment) {
+ if (st->nents)
+ sg = __sg_next(sg);
+
+ sg_dma_address(sg) = region_start + offset;
+ sg_dma_len(sg) = 0;
+ sg->length = 0;
+ st->nents++;
+ }
+
+ len = min(block_size, max_segment - sg->length);
+ sg->length += len;
+ sg_dma_len(sg) += len;
+
+ offset += len;
+ block_size -= len;
+
+ prev_end = offset;
+ }
+
+ sg_mark_end(sg);
+ i915_sg_trim(st);
+
+ return st;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/scatterlist.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
index 9cb26a224034..5acca45ea981 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -13,6 +13,8 @@
#include "i915_gem.h"
+struct drm_mm_node;
+
/*
* Optimised SGL iterator for GEM objects
*/
@@ -101,15 +103,23 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
-static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
+/**
+ * i915_sg_dma_sizes - Record the dma segment sizes of a scatterlist
+ * @sg: The scatterlist
+ *
+ * Return: An unsigned int with segment sizes logically or'ed together.
+ * A caller can use this information to determine what hardware page table
+ * entry sizes can be used to map the memory represented by the scatterlist.
+ */
+static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg)
{
unsigned int page_sizes;
page_sizes = 0;
- while (sg) {
+ while (sg && sg_dma_len(sg)) {
GEM_BUG_ON(sg->offset);
- GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
- page_sizes |= sg->length;
+ GEM_BUG_ON(!IS_ALIGNED(sg_dma_len(sg), PAGE_SIZE));
+ page_sizes |= sg_dma_len(sg);
sg = __sg_next(sg);
}
@@ -133,4 +143,6 @@ static inline unsigned int i915_sg_segment_size(void)
bool i915_sg_trim(struct sg_table *orig_st);
+struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
+ u64 region_start);
#endif
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0bc7b49f843c..f7b55f34dba8 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -38,14 +38,14 @@ static void intel_save_swf(struct drm_i915_private *dev_priv)
int i;
/* Scratch space */
- if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
+ if (GRAPHICS_VER(dev_priv) == 2 && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
}
for (i = 0; i < 3; i++)
dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
- } else if (IS_GEN(dev_priv, 2)) {
+ } else if (GRAPHICS_VER(dev_priv) == 2) {
for (i = 0; i < 7; i++)
dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
} else if (HAS_GMCH(dev_priv)) {
@@ -63,14 +63,14 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv)
int i;
/* Scratch space */
- if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
+ if (GRAPHICS_VER(dev_priv) == 2 && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
- } else if (IS_GEN(dev_priv, 2)) {
+ } else if (GRAPHICS_VER(dev_priv) == 2) {
for (i = 0; i < 7; i++)
intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
} else if (HAS_GMCH(dev_priv)) {
@@ -87,11 +87,14 @@ void i915_save_display(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
/* Display arbitration control */
- if (INTEL_GEN(dev_priv) <= 4)
+ if (GRAPHICS_VER(dev_priv) <= 4)
dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv, DSPARB);
- if (IS_GEN(dev_priv, 4))
+ if (GRAPHICS_VER(dev_priv) == 4)
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
@@ -102,14 +105,17 @@ void i915_restore_display(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
intel_restore_swf(dev_priv);
- if (IS_GEN(dev_priv, 4))
+ if (GRAPHICS_VER(dev_priv) == 4)
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
/* Display arbitration */
- if (INTEL_GEN(dev_priv) <= 4)
+ if (GRAPHICS_VER(dev_priv) <= 4)
intel_de_write(dev_priv, DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 2744558f3050..c589a681da77 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+ ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -606,7 +606,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = dma_resv_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_unlocked(resv);
}
if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 45d32ef42787..873bf996ceb5 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -58,8 +58,8 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
-static ssize_t
-show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
+static ssize_t rc6_enable_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
unsigned int mask;
@@ -72,46 +72,46 @@ show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
if (HAS_RC6pp(dev_priv))
mask |= BIT(2);
- return snprintf(buf, PAGE_SIZE, "%x\n", mask);
+ return sysfs_emit(buf, "%x\n", mask);
}
-static ssize_t
-show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+static ssize_t rc6_residency_ms_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
- return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
+ return sysfs_emit(buf, "%u\n", rc6_residency);
}
-static ssize_t
-show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+static ssize_t rc6p_residency_ms_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
- return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
+ return sysfs_emit(buf, "%u\n", rc6p_residency);
}
-static ssize_t
-show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+static ssize_t rc6pp_residency_ms_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
- return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
+ return sysfs_emit(buf, "%u\n", rc6pp_residency);
}
-static ssize_t
-show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+static ssize_t media_rc6_residency_ms_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
- return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
+ return sysfs_emit(buf, "%u\n", rc6_residency);
}
-static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
-static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
-static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
-static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
-static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
+static DEVICE_ATTR_RO(rc6_enable);
+static DEVICE_ATTR_RO(rc6_residency_ms);
+static DEVICE_ATTR_RO(rc6p_residency_ms);
+static DEVICE_ATTR_RO(rc6pp_residency_ms);
+static DEVICE_ATTR_RO(media_rc6_residency_ms);
static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr,
@@ -263,8 +263,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &i915->gt.rps;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_rps_read_actual_frequency(rps));
+ return sysfs_emit(buf, "%d\n", intel_rps_read_actual_frequency(rps));
}
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
@@ -273,8 +272,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &i915->gt.rps;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(rps, rps->cur_freq));
+ return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->cur_freq));
}
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -282,8 +280,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &i915->gt.rps;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(rps, rps->boost_freq));
+ return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->boost_freq));
}
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -323,8 +320,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt.rps;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(rps, rps->efficient_freq));
+ return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -332,8 +328,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt.rps;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(rps, rps->max_freq_softlimit));
+ return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->max_freq_softlimit));
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -387,8 +382,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt.rps;
- return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(rps, rps->min_freq_softlimit));
+ return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->min_freq_softlimit));
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -462,7 +456,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
else
BUG();
- return snprintf(buf, PAGE_SIZE, "%d\n", val);
+ return sysfs_emit(buf, "%d\n", val);
}
static const struct attribute * const gen6_attrs[] = {
@@ -601,7 +595,7 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
ret = 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
- else if (INTEL_GEN(dev_priv) >= 6)
+ else if (GRAPHICS_VER(dev_priv) >= 6)
ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
if (ret)
drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index a4addcc64978..6778ad2a14a4 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
+#include "display/intel_crtc.h"
#include "display/intel_display_types.h"
#include "gt/intel_engine.h"
@@ -474,6 +475,44 @@ TRACE_EVENT(intel_pipe_update_end,
__entry->scanline)
);
+/* frontbuffer tracking */
+
+TRACE_EVENT(intel_frontbuffer_invalidate,
+ TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin),
+ TP_ARGS(frontbuffer_bits, origin),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, frontbuffer_bits)
+ __field(unsigned int, origin)
+ ),
+
+ TP_fast_assign(
+ __entry->frontbuffer_bits = frontbuffer_bits;
+ __entry->origin = origin;
+ ),
+
+ TP_printk("frontbuffer_bits=0x%08x, origin=%u",
+ __entry->frontbuffer_bits, __entry->origin)
+);
+
+TRACE_EVENT(intel_frontbuffer_flush,
+ TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin),
+ TP_ARGS(frontbuffer_bits, origin),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, frontbuffer_bits)
+ __field(unsigned int, origin)
+ ),
+
+ TP_fast_assign(
+ __entry->frontbuffer_bits = frontbuffer_bits;
+ __entry->origin = origin;
+ ),
+
+ TP_printk("frontbuffer_bits=0x%08x, origin=%u",
+ __entry->frontbuffer_bits, __entry->origin)
+);
+
/* object tracking */
TRACE_EVENT(i915_gem_object_create,
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index abd4dcd9f79c..5259edacde38 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -201,6 +201,11 @@ __check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
__T; \
})
+static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
+{
+ return a - b;
+}
+
/*
* container_of_user: Extract the superclass from a pointer to a member.
*
@@ -418,6 +423,11 @@ static inline const char *onoff(bool v)
return v ? "on" : "off";
}
+static inline const char *enabledisable(bool v)
+{
+ return v ? "enable" : "disable";
+}
+
static inline const char *enableddisabled(bool v)
{
return v ? "enabled" : "disabled";
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 172799277dd5..31a105bc1792 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -74,7 +74,7 @@ void intel_vgpu_detect(struct drm_i915_private *dev_priv)
* we do not support VGT on older gens, return early so we don't have
* to consider differently numbered or sized MMIO bars
*/
- if (INTEL_GEN(dev_priv) < 6)
+ if (GRAPHICS_VER(dev_priv) < 6)
return;
shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 07490db51cdc..0f227f28b280 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -27,6 +27,7 @@
#include "display/intel_frontbuffer.h"
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
@@ -93,7 +94,6 @@ static int __i915_vma_active(struct i915_active *ref)
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
}
-__i915_active_call
static void __i915_vma_retire(struct i915_active *ref)
{
i915_vma_put(active_to_vma(ref));
@@ -124,7 +124,7 @@ vma_create(struct drm_i915_gem_object *obj,
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
+ i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
/* Declare ourselves safe for use inside shrinkers */
if (IS_ENABLED(CONFIG_LOCKDEP)) {
@@ -274,7 +274,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
GEM_BUG_ON(!atomic_read(&vm->open));
spin_lock(&obj->vma.lock);
@@ -448,9 +448,11 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void __iomem *ptr;
int err;
- if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
- err = -ENODEV;
- goto err;
+ if (!i915_gem_object_is_lmem(vma->obj)) {
+ if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+ err = -ENODEV;
+ goto err;
+ }
}
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -458,9 +460,19 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = READ_ONCE(vma->iomap);
if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
- vma->node.start,
- vma->node.size);
+ /*
+ * TODO: consider just using i915_gem_object_pin_map() for lmem
+ * instead, which already supports mapping non-contiguous chunks
+ * of pages, that way we can also drop the
+ * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
+ */
+ if (i915_gem_object_is_lmem(vma->obj))
+ ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
+ vma->obj->base.size);
+ else
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
+ vma->node.start,
+ vma->node.size);
if (ptr == NULL) {
err = -ENOMEM;
goto err;
@@ -788,32 +800,37 @@ unpinned:
static int vma_get_pages(struct i915_vma *vma)
{
int err = 0;
+ bool pinned_pages = false;
if (atomic_add_unless(&vma->pages_count, 1, 0))
return 0;
+ if (vma->obj) {
+ err = i915_gem_object_pin_pages(vma->obj);
+ if (err)
+ return err;
+ pinned_pages = true;
+ }
+
/* Allocations ahoy! */
- if (mutex_lock_interruptible(&vma->pages_mutex))
- return -EINTR;
+ if (mutex_lock_interruptible(&vma->pages_mutex)) {
+ err = -EINTR;
+ goto unpin;
+ }
if (!atomic_read(&vma->pages_count)) {
- if (vma->obj) {
- err = i915_gem_object_pin_pages(vma->obj);
- if (err)
- goto unlock;
- }
-
err = vma->ops->set_pages(vma);
- if (err) {
- if (vma->obj)
- i915_gem_object_unpin_pages(vma->obj);
+ if (err)
goto unlock;
- }
+ pinned_pages = false;
}
atomic_inc(&vma->pages_count);
unlock:
mutex_unlock(&vma->pages_mutex);
+unpin:
+ if (pinned_pages)
+ __i915_gem_object_unpin_pages(vma->obj);
return err;
}
@@ -905,8 +922,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
goto err_fence;
- err = i915_vm_pin_pt_stash(vma->vm,
- &work->stash);
+ err = i915_vm_map_pt_stash(vma->vm, &work->stash);
if (err)
goto err_fence;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 8df784a026d2..eca452a9851f 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -68,6 +68,11 @@ static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
+static inline bool i915_vma_is_dpt(const struct i915_vma *vma)
+{
+ return i915_is_dpt(vma->vm);
+}
+
static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
{
return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
@@ -146,11 +151,6 @@ static inline void i915_vma_put(struct i915_vma *vma)
i915_gem_object_put(vma->obj);
}
-static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
-{
- return a - b;
-}
-
static inline long
i915_vma_compare(struct i915_vma *vma,
struct i915_address_space *vm,
@@ -158,7 +158,7 @@ i915_vma_compare(struct i915_vma *vma,
{
ptrdiff_t cmp;
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
cmp = ptrdiff(vma->vm, vm);
if (cmp)
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 6b1bfa230b82..995b502d7e5d 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -286,4 +286,3 @@ struct i915_vma {
};
#endif
-
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index de02207f6ec6..7eaa92fee421 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -67,6 +67,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(ROCKETLAKE),
PLATFORM_NAME(DG1),
PLATFORM_NAME(ALDERLAKE_S),
+ PLATFORM_NAME(ALDERLAKE_P),
};
#undef PLATFORM_NAME
@@ -95,7 +96,9 @@ static const char *iommu_name(void)
void intel_device_info_print_static(const struct intel_device_info *info,
struct drm_printer *p)
{
- drm_printf(p, "gen: %d\n", info->gen);
+ drm_printf(p, "graphics_ver: %u\n", info->graphics_ver);
+ drm_printf(p, "media_ver: %u\n", info->media_ver);
+ drm_printf(p, "display_ver: %u\n", info->display.ver);
drm_printf(p, "gt: %d\n", info->gt);
drm_printf(p, "iommu: %s\n", iommu_name());
drm_printf(p, "memory-regions: %x\n", info->memory_regions);
@@ -254,10 +257,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (IS_ADLS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
for_each_pipe(dev_priv, pipe)
runtime->num_scalers[pipe] = 0;
- else if (INTEL_GEN(dev_priv) >= 10) {
+ else if (GRAPHICS_VER(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
runtime->num_scalers[pipe] = 2;
- } else if (IS_GEN(dev_priv, 9)) {
+ } else if (GRAPHICS_VER(dev_priv) == 9) {
runtime->num_scalers[PIPE_A] = 2;
runtime->num_scalers[PIPE_B] = 2;
runtime->num_scalers[PIPE_C] = 1;
@@ -265,13 +268,13 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
- if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 4;
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (GRAPHICS_VER(dev_priv) >= 11)
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 6;
- else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
+ else if (GRAPHICS_VER(dev_priv) == 10 || IS_GEMINILAKE(dev_priv))
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 3;
else if (IS_BROXTON(dev_priv)) {
@@ -290,12 +293,12 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 2;
- } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
+ } else if (GRAPHICS_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) {
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 1;
}
- if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
+ if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
u32 sfuse_strap = intel_de_read(dev_priv, SFUSE_STRAP);
@@ -322,7 +325,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->pipe_mask &= ~BIT(PIPE_C);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
+ } else if (HAS_DISPLAY(dev_priv) && GRAPHICS_VER(dev_priv) >= 9) {
u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
@@ -337,7 +340,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->pipe_mask &= ~BIT(PIPE_C);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- if (INTEL_GEN(dev_priv) >= 12 &&
+ if (GRAPHICS_VER(dev_priv) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
info->pipe_mask &= ~BIT(PIPE_D);
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
@@ -349,15 +352,15 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
info->display.has_fbc = 0;
- if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
- info->display.has_csr = 0;
+ if (GRAPHICS_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ info->display.has_dmc = 0;
- if (INTEL_GEN(dev_priv) >= 10 &&
+ if (GRAPHICS_VER(dev_priv) >= 10 &&
(dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
info->display.has_dsc = 0;
}
- if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
+ if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active()) {
drm_info(&dev_priv->drm,
"Disabling ppGTT for VT-d support\n");
info->ppgtt_type = INTEL_PPGTT_NONE;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 2f442d418a15..b326aff65cd6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -87,6 +87,7 @@ enum intel_platform {
INTEL_ROCKETLAKE,
INTEL_DG1,
INTEL_ALDERLAKE_S,
+ INTEL_ALDERLAKE_P,
INTEL_MAX_PLATFORMS
};
@@ -140,7 +141,7 @@ enum intel_ppgtt_type {
#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
/* Keep in alphabetical order */ \
func(cursor_needs_physical); \
- func(has_csr); \
+ func(has_dmc); \
func(has_ddi); \
func(has_dp_mst); \
func(has_dsb); \
@@ -160,9 +161,9 @@ enum intel_ppgtt_type {
func(supports_tv);
struct intel_device_info {
- u16 gen_mask;
+ u8 graphics_ver;
+ u8 media_ver;
- u8 gen;
u8 gt; /* GT number, 0 if undefined */
intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
@@ -184,20 +185,24 @@ struct intel_device_info {
u8 abox_mask;
+ u8 has_cdclk_crawl; /* does support CDCLK crawling */
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
struct {
- u8 version;
+ u8 ver;
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
} display;
- u16 ddb_size; /* in blocks */
- u8 num_supported_dbuf_slices; /* number of DBuf slices */
+ struct {
+ u16 size; /* in blocks */
+ u8 slice_mask;
+ } dbuf;
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 1e53c017c30d..50fdea84ba70 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -121,7 +121,7 @@ skl_dram_get_dimm_info(struct drm_i915_private *i915,
struct dram_dimm_info *dimm,
int channel, char dimm_name, u16 val)
{
- if (INTEL_GEN(i915) >= 10) {
+ if (GRAPHICS_VER(i915) >= 10) {
dimm->size = cnl_get_dimm_size(val);
dimm->width = cnl_get_dimm_width(val);
dimm->ranks = cnl_get_dimm_ranks(val);
@@ -422,7 +422,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- if (IS_GEN(dev_priv, 12)) {
+ if (GRAPHICS_VER(dev_priv) == 12) {
switch (val & 0xf) {
case 0:
dram_info->type = INTEL_DRAM_DDR4;
@@ -501,12 +501,12 @@ void intel_dram_detect(struct drm_i915_private *i915)
*/
dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
- if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
+ if (GRAPHICS_VER(i915) < 9 || !HAS_DISPLAY(i915))
return;
- if (INTEL_GEN(i915) >= 12)
+ if (GRAPHICS_VER(i915) >= 12)
ret = gen12_get_dram_info(i915);
- else if (INTEL_GEN(i915) >= 11)
+ else if (GRAPHICS_VER(i915) >= 11)
ret = gen11_get_dram_info(i915);
else if (IS_GEN9_LP(i915))
ret = bxt_get_dram_info(i915);
@@ -535,7 +535,7 @@ void intel_dram_edram_detect(struct drm_i915_private *i915)
{
u32 edram_cap = 0;
- if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9))
+ if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || GRAPHICS_VER(i915) >= 9))
return;
edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
@@ -549,7 +549,7 @@ void intel_dram_edram_detect(struct drm_i915_private *i915)
* The needed capability bits for size calculation are not there with
* pre gen9 so return 128MB always.
*/
- if (INTEL_GEN(i915) < 9)
+ if (GRAPHICS_VER(i915) < 9)
i915->edram_size_mb = 128;
else
i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index bf837b6bb185..e6024eb7cca4 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -22,162 +22,102 @@ static const struct {
.class = INTEL_MEMORY_STOLEN_SYSTEM,
.instance = 0,
},
+ [INTEL_REGION_STOLEN_LMEM] = {
+ .class = INTEL_MEMORY_STOLEN_LOCAL,
+ .instance = 0,
+ },
+};
+
+struct intel_region_reserve {
+ struct list_head link;
+ struct ttm_resource *res;
};
struct intel_memory_region *
-intel_memory_region_by_type(struct drm_i915_private *i915,
- enum intel_memory_type mem_type)
+intel_memory_region_lookup(struct drm_i915_private *i915,
+ u16 class, u16 instance)
{
struct intel_memory_region *mr;
int id;
- for_each_memory_region(mr, i915, id)
- if (mr->type == mem_type)
+ /* XXX: consider maybe converting to an rb tree at some point */
+ for_each_memory_region(mr, i915, id) {
+ if (mr->type == class && mr->instance == instance)
return mr;
-
- return NULL;
-}
-
-static u64
-intel_memory_region_free_pages(struct intel_memory_region *mem,
- struct list_head *blocks)
-{
- struct i915_buddy_block *block, *on;
- u64 size = 0;
-
- list_for_each_entry_safe(block, on, blocks, link) {
- size += i915_buddy_block_size(&mem->mm, block);
- i915_buddy_free(&mem->mm, block);
}
- INIT_LIST_HEAD(blocks);
- return size;
+ return NULL;
}
-void
-__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
- struct list_head *blocks)
+struct intel_memory_region *
+intel_memory_region_by_type(struct drm_i915_private *i915,
+ enum intel_memory_type mem_type)
{
- mutex_lock(&mem->mm_lock);
- mem->avail += intel_memory_region_free_pages(mem, blocks);
- mutex_unlock(&mem->mm_lock);
-}
+ struct intel_memory_region *mr;
+ int id;
-void
-__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
-{
- struct list_head blocks;
+ for_each_memory_region(mr, i915, id)
+ if (mr->type == mem_type)
+ return mr;
- INIT_LIST_HEAD(&blocks);
- list_add(&block->link, &blocks);
- __intel_memory_region_put_pages_buddy(block->private, &blocks);
+ return NULL;
}
-int
-__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags,
- struct list_head *blocks)
+/**
+ * intel_memory_region_unreserve - Unreserve all previously reserved
+ * ranges
+ * @mem: The region containing the reserved ranges.
+ */
+void intel_memory_region_unreserve(struct intel_memory_region *mem)
{
- unsigned int min_order = 0;
- unsigned long n_pages;
-
- GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
- GEM_BUG_ON(!list_empty(blocks));
-
- if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
- min_order = ilog2(mem->min_page_size) -
- ilog2(mem->mm.chunk_size);
- }
+ struct intel_region_reserve *reserve, *next;
- if (flags & I915_ALLOC_CONTIGUOUS) {
- size = roundup_pow_of_two(size);
- min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
- }
-
- if (size > mem->mm.size)
- return -E2BIG;
-
- n_pages = size >> ilog2(mem->mm.chunk_size);
+ if (!mem->priv_ops || !mem->priv_ops->free)
+ return;
mutex_lock(&mem->mm_lock);
-
- do {
- struct i915_buddy_block *block;
- unsigned int order;
-
- order = fls(n_pages) - 1;
- GEM_BUG_ON(order > mem->mm.max_order);
- GEM_BUG_ON(order < min_order);
-
- do {
- block = i915_buddy_alloc(&mem->mm, order);
- if (!IS_ERR(block))
- break;
-
- if (order-- == min_order)
- goto err_free_blocks;
- } while (1);
-
- n_pages -= BIT(order);
-
- block->private = mem;
- list_add_tail(&block->link, blocks);
-
- if (!n_pages)
- break;
- } while (1);
-
- mem->avail -= size;
- mutex_unlock(&mem->mm_lock);
- return 0;
-
-err_free_blocks:
- intel_memory_region_free_pages(mem, blocks);
+ list_for_each_entry_safe(reserve, next, &mem->reserved, link) {
+ list_del(&reserve->link);
+ mem->priv_ops->free(mem, reserve->res);
+ kfree(reserve);
+ }
mutex_unlock(&mem->mm_lock);
- return -ENXIO;
}
-struct i915_buddy_block *
-__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+/**
+ * intel_memory_region_reserve - Reserve a memory range
+ * @mem: The region for which we want to reserve a range.
+ * @offset: Start of the range to reserve.
+ * @size: The size of the range to reserve.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int intel_memory_region_reserve(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size)
{
- struct i915_buddy_block *block;
- LIST_HEAD(blocks);
int ret;
+ struct intel_region_reserve *reserve;
- ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
- if (ret)
- return ERR_PTR(ret);
-
- block = list_first_entry(&blocks, typeof(*block), link);
- list_del_init(&block->link);
- return block;
-}
-
-int intel_memory_region_init_buddy(struct intel_memory_region *mem)
-{
- return i915_buddy_init(&mem->mm, resource_size(&mem->region),
- PAGE_SIZE);
-}
+ if (!mem->priv_ops || !mem->priv_ops->reserve)
+ return -EINVAL;
-void intel_memory_region_release_buddy(struct intel_memory_region *mem)
-{
- i915_buddy_free_list(&mem->mm, &mem->reserved);
- i915_buddy_fini(&mem->mm);
-}
+ reserve = kzalloc(sizeof(*reserve), GFP_KERNEL);
+ if (!reserve)
+ return -ENOMEM;
-int intel_memory_region_reserve(struct intel_memory_region *mem,
- u64 offset, u64 size)
-{
- int ret;
+ reserve->res = mem->priv_ops->reserve(mem, offset, size);
+ if (IS_ERR(reserve->res)) {
+ ret = PTR_ERR(reserve->res);
+ kfree(reserve);
+ return ret;
+ }
mutex_lock(&mem->mm_lock);
- ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size);
+ list_add_tail(&reserve->link, &mem->reserved);
mutex_unlock(&mem->mm_lock);
- return ret;
+ return 0;
}
struct intel_memory_region *
@@ -186,6 +126,8 @@ intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t size,
resource_size_t min_page_size,
resource_size_t io_start,
+ u16 type,
+ u16 instance,
const struct intel_memory_region_ops *ops)
{
struct intel_memory_region *mem;
@@ -202,6 +144,8 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->ops = ops;
mem->total = size;
mem->avail = mem->total;
+ mem->type = type;
+ mem->instance = instance;
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
@@ -239,6 +183,7 @@ static void __intel_memory_region_destroy(struct kref *kref)
struct intel_memory_region *mem =
container_of(kref, typeof(*mem), kref);
+ intel_memory_region_unreserve(mem);
if (mem->ops->release)
mem->ops->release(mem);
@@ -276,10 +221,17 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
instance = intel_region_map[i].instance;
switch (type) {
case INTEL_MEMORY_SYSTEM:
- mem = i915_gem_shmem_setup(i915);
+ mem = i915_gem_shmem_setup(i915, type, instance);
+ break;
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ mem = i915_gem_stolen_lmem_setup(i915, type, instance);
+ if (!IS_ERR(mem))
+ i915->mm.stolen_region = mem;
break;
case INTEL_MEMORY_STOLEN_SYSTEM:
- mem = i915_gem_stolen_setup(i915);
+ mem = i915_gem_stolen_smem_setup(i915, type, instance);
+ if (!IS_ERR(mem))
+ i915->mm.stolen_region = mem;
break;
default:
continue;
@@ -294,9 +246,6 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
}
mem->id = i;
- mem->type = type;
- mem->instance = instance;
-
i915->mm.regions[i] = mem;
}
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index edd49067c8ca..1f7dac63abb7 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -11,33 +11,34 @@
#include <linux/mutex.h>
#include <linux/io-mapping.h>
#include <drm/drm_mm.h>
-
-#include "i915_buddy.h"
+#include <drm/i915_drm.h>
struct drm_i915_private;
struct drm_i915_gem_object;
struct intel_memory_region;
struct sg_table;
+struct ttm_resource;
-/**
- * Base memory type
- */
enum intel_memory_type {
- INTEL_MEMORY_SYSTEM = 0,
- INTEL_MEMORY_LOCAL,
+ INTEL_MEMORY_SYSTEM = I915_MEMORY_CLASS_SYSTEM,
+ INTEL_MEMORY_LOCAL = I915_MEMORY_CLASS_DEVICE,
INTEL_MEMORY_STOLEN_SYSTEM,
+ INTEL_MEMORY_STOLEN_LOCAL,
+ INTEL_MEMORY_MOCK,
};
enum intel_region_id {
INTEL_REGION_SMEM = 0,
INTEL_REGION_LMEM,
INTEL_REGION_STOLEN_SMEM,
+ INTEL_REGION_STOLEN_LMEM,
INTEL_REGION_UNKNOWN, /* Should be last */
};
#define REGION_SMEM BIT(INTEL_REGION_SMEM)
#define REGION_LMEM BIT(INTEL_REGION_LMEM)
#define REGION_STOLEN_SMEM BIT(INTEL_REGION_STOLEN_SMEM)
+#define REGION_STOLEN_LMEM BIT(INTEL_REGION_STOLEN_LMEM)
#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
#define I915_ALLOC_CONTIGUOUS BIT(1)
@@ -58,10 +59,19 @@ struct intel_memory_region_ops {
unsigned int flags);
};
+struct intel_memory_region_private_ops {
+ struct ttm_resource *(*reserve)(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size);
+ void (*free)(struct intel_memory_region *mem,
+ struct ttm_resource *res);
+};
+
struct intel_memory_region {
struct drm_i915_private *i915;
const struct intel_memory_region_ops *ops;
+ const struct intel_memory_region_private_ops *priv_ops;
struct io_mapping iomap;
struct resource region;
@@ -69,7 +79,6 @@ struct intel_memory_region {
/* For fake LMEM */
struct drm_mm_node fake_mappable;
- struct i915_buddy_mm mm;
struct mutex mm_lock;
struct kref kref;
@@ -82,7 +91,8 @@ struct intel_memory_region {
u16 type;
u16 instance;
enum intel_region_id id;
- char name[8];
+ char name[16];
+ bool private; /* not for userspace */
struct list_head reserved;
@@ -93,25 +103,17 @@ struct intel_memory_region {
struct list_head list;
struct list_head purgeable;
} objects;
-};
-int intel_memory_region_init_buddy(struct intel_memory_region *mem);
-void intel_memory_region_release_buddy(struct intel_memory_region *mem);
-
-int __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags,
- struct list_head *blocks);
-struct i915_buddy_block *
-__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags);
-void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
- struct list_head *blocks);
-void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block);
+ size_t chunk_size;
+ unsigned int max_order;
+ bool is_range_manager;
-int intel_memory_region_reserve(struct intel_memory_region *mem,
- u64 offset, u64 size);
+ void *region_private;
+};
+
+struct intel_memory_region *
+intel_memory_region_lookup(struct drm_i915_private *i915,
+ u16 class, u16 instance);
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
@@ -119,6 +121,8 @@ intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t size,
resource_size_t min_page_size,
resource_size_t io_start,
+ u16 type,
+ u16 instance,
const struct intel_memory_region_ops *ops);
struct intel_memory_region *
@@ -135,4 +139,9 @@ __printf(2, 3) void
intel_memory_region_set_name(struct intel_memory_region *mem,
const char *fmt, ...);
+void intel_memory_region_unreserve(struct intel_memory_region *mem);
+
+int intel_memory_region_reserve(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size);
#endif
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 7476f0e063c6..4e92ae19189e 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -13,17 +13,17 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
switch (id) {
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_GEN(dev_priv, 5));
+ drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) != 5);
return PCH_IBX;
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
drm_WARN_ON(&dev_priv->drm,
- !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
return PCH_CPT;
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
drm_WARN_ON(&dev_priv->drm,
- !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
/* PantherPoint is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
@@ -130,8 +130,10 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
return PCH_JSP;
case INTEL_PCH_ADP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
+ !IS_ALDERLAKE_P(dev_priv));
return PCH_ADP;
default:
return PCH_NONE;
@@ -161,7 +163,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
* make an educated guess as to which PCH is really there.
*/
- if (IS_ALDERLAKE_S(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
@@ -179,9 +181,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ else if (GRAPHICS_VER(dev_priv) == 6 || IS_IVYBRIDGE(dev_priv))
id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 5))
+ else if (GRAPHICS_VER(dev_priv) == 5)
id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
if (id)
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 7318377503b0..e2f3f30c6445 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -55,6 +55,7 @@ enum intel_pch {
#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
+#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0e2501b7fc27..45fefa0ed160 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -35,6 +35,7 @@
#include "display/intel_atomic.h"
#include "display/intel_atomic_plane.h"
#include "display/intel_bw.h"
+#include "display/intel_de.h"
#include "display/intel_display_types.h"
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
@@ -2339,7 +2340,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_I945GM(dev_priv))
wm_info = &i945_wm_info;
- else if (!IS_DISPLAY_VER(dev_priv, 2))
+ else if (DISPLAY_VER(dev_priv) != 2)
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
@@ -2353,7 +2354,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc->base.primary->state->fb;
int cpp;
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2368,7 +2369,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
planea_wm = wm_info->max_wm;
}
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
wm_info = &i830_bc_wm_info;
fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
@@ -2380,7 +2381,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc->base.primary->state->fb;
int cpp;
- if (IS_DISPLAY_VER(dev_priv, 2))
+ if (DISPLAY_VER(dev_priv) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2967,7 +2968,7 @@ static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (IS_DISPLAY_VER(dev_priv, 5))
+ if (DISPLAY_VER(dev_priv) == 5)
wm[0] = 13;
}
@@ -2975,14 +2976,16 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (IS_DISPLAY_VER(dev_priv, 5))
+ if (DISPLAY_VER(dev_priv) == 5)
wm[0] = 13;
}
int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
{
/* how many WM levels are we expecting */
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (HAS_HW_SAGV_WM(dev_priv))
+ return 5;
+ else if (DISPLAY_VER(dev_priv) >= 9)
return 7;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 4;
@@ -3105,7 +3108,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_DISPLAY_VER(dev_priv, 6)) {
+ if (DISPLAY_VER(dev_priv) == 6) {
snb_wm_latency_quirk(dev_priv);
snb_wm_lp3_irq_quirk(dev_priv);
}
@@ -3354,7 +3357,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_DISPLAY_VER(dev_priv, 5) && !merged->fbc_wm_enabled &&
+ if (DISPLAY_VER(dev_priv) == 5 && !merged->fbc_wm_enabled &&
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3636,16 +3639,16 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
{
- int i;
- int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
- u8 enabled_slices_mask = 0;
+ u8 enabled_slices = 0;
+ enum dbuf_slice slice;
- for (i = 0; i < max_slices; i++) {
- if (intel_uncore_read(&dev_priv->uncore, DBUF_CTL_S(i)) & DBUF_POWER_STATE)
- enabled_slices_mask |= BIT(i);
+ for_each_dbuf_slice(dev_priv, slice) {
+ if (intel_uncore_read(&dev_priv->uncore,
+ DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
+ enabled_slices |= BIT(slice);
}
- return enabled_slices_mask;
+ return enabled_slices;
}
/*
@@ -3654,13 +3657,13 @@ u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
*/
static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
{
- return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
+ return DISPLAY_VER(dev_priv) == 9;
}
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
- return (IS_GEN9_BC(dev_priv) || DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) &&
+ return DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv) &&
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
@@ -3680,13 +3683,13 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
}
drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
- } else if (IS_DISPLAY_VER(dev_priv, 11)) {
+ } else if (DISPLAY_VER(dev_priv) == 11) {
dev_priv->sagv_block_time_us = 10;
return;
- } else if (IS_DISPLAY_VER(dev_priv, 10)) {
+ } else if (DISPLAY_VER(dev_priv) == 10) {
dev_priv->sagv_block_time_us = 20;
return;
- } else if (IS_DISPLAY_VER(dev_priv, 9)) {
+ } else if (DISPLAY_VER(dev_priv) == 9) {
dev_priv->sagv_block_time_us = 30;
return;
} else {
@@ -4010,8 +4013,9 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
* latter from the plane commit hooks (especially in the legacy
* cursor case)
*/
- pipe_wm->use_sagv_wm = DISPLAY_VER(dev_priv) >= 12 &&
- intel_can_enable_sagv(dev_priv, new_bw_state);
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(dev_priv) &&
+ DISPLAY_VER(dev_priv) >= 12 &&
+ intel_can_enable_sagv(dev_priv, new_bw_state);
}
if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
@@ -4028,22 +4032,10 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return 0;
}
-static int intel_dbuf_size(struct drm_i915_private *dev_priv)
-{
- int ddb_size = INTEL_INFO(dev_priv)->ddb_size;
-
- drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
-
- if (DISPLAY_VER(dev_priv) < 11)
- return ddb_size - 4; /* 4 blocks for bypass path allocation */
-
- return ddb_size;
-}
-
static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
{
- return intel_dbuf_size(dev_priv) /
- INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ return INTEL_INFO(dev_priv)->dbuf.size /
+ hweight8(INTEL_INFO(dev_priv)->dbuf.slice_mask);
}
static void
@@ -4062,18 +4054,29 @@ skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
ddb->end = fls(slice_mask) * slice_size;
WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > intel_dbuf_size(dev_priv));
+ WARN_ON(ddb->end > INTEL_INFO(dev_priv)->dbuf.size);
+}
+
+static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+{
+ struct skl_ddb_entry ddb;
+
+ if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
+ slice_mask = BIT(DBUF_S1);
+ else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
+ slice_mask = BIT(DBUF_S3);
+
+ skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+
+ return ddb.start;
}
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
- u32 slice_mask = 0;
- u16 ddb_size = intel_dbuf_size(dev_priv);
- u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
- u16 slice_size = ddb_size / num_supported_slices;
- u16 start_slice;
- u16 end_slice;
+ int slice_size = intel_dbuf_slice_size(dev_priv);
+ enum dbuf_slice start_slice, end_slice;
+ u8 slice_mask = 0;
if (!skl_ddb_entry_size(entry))
return 0;
@@ -4160,6 +4163,7 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
struct intel_crtc_state *crtc_state;
struct skl_ddb_entry ddb_slices;
enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset = 0;
u32 ddb_range_size;
u32 dbuf_slice_mask;
u32 start, end;
@@ -4174,6 +4178,7 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
dbuf_slice_mask = new_dbuf_state->slices[pipe];
skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(dev_priv, dbuf_slice_mask);
ddb_range_size = skl_ddb_entry_size(&ddb_slices);
intel_crtc_dbuf_weights(new_dbuf_state, pipe,
@@ -4182,11 +4187,11 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
start = ddb_range_size * weight_start / weight_total;
end = ddb_range_size * weight_end / weight_total;
- new_dbuf_state->ddb[pipe].start = ddb_slices.start + start;
- new_dbuf_state->ddb[pipe].end = ddb_slices.start + end;
-
+ new_dbuf_state->ddb[pipe].start = ddb_slices.start - mbus_offset + start;
+ new_dbuf_state->ddb[pipe].end = ddb_slices.start - mbus_offset + end;
out:
- if (skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
+ skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
&new_dbuf_state->ddb[pipe]))
return 0;
@@ -4198,7 +4203,12 @@ out:
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- crtc_state->wm.skl.ddb = new_dbuf_state->ddb[pipe];
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
@@ -4256,7 +4266,6 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
struct skl_ddb_entry *entry, u32 reg)
{
-
entry->start = reg & DDB_ENTRY_MASK;
entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
@@ -4381,6 +4390,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
struct dbuf_slice_conf_entry {
u8 active_pipes;
u8 dbuf_mask[I915_MAX_PIPES];
+ bool join_mbus;
};
/*
@@ -4569,6 +4579,137 @@ static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
{}
};
+static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {}
+
+};
+
+static bool check_mbus_joined(u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes)
+ return dbuf_slices[i].join_mbus;
+ }
+ return false;
+}
+
+static bool adlp_check_mbus_joined(u8 active_pipes)
+{
+ return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
+}
+
static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
const struct dbuf_slice_conf_entry *dbuf_slices)
{
@@ -4608,14 +4749,21 @@ static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
}
+static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+{
+ return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs);
+}
+
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (IS_DISPLAY_VER(dev_priv, 12))
+ if (IS_ALDERLAKE_P(dev_priv))
+ return adlp_compute_dbuf_slices(pipe, active_pipes);
+ else if (DISPLAY_VER(dev_priv) == 12)
return tgl_compute_dbuf_slices(pipe, active_pipes);
- else if (IS_DISPLAY_VER(dev_priv, 11))
+ else if (DISPLAY_VER(dev_priv) == 11)
return icl_compute_dbuf_slices(pipe, active_pipes);
/*
* For anything else just return one slice yet.
@@ -4986,7 +5134,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
* Wa_1408961008:icl, ehl
* Underruns with WM1+ disabled
*/
- if (IS_DISPLAY_VER(dev_priv, 11) &&
+ if (DISPLAY_VER(dev_priv) == 11 &&
level == 1 && wm->wm[0].enable) {
wm->wm[level].blocks = wm->wm[0].blocks;
wm->wm[level].lines = wm->wm[0].lines;
@@ -5199,6 +5347,14 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
return level > 0;
}
+static int skl_wm_max_lines(struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 13)
+ return 255;
+ else
+ return 31;
+}
+
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
unsigned int latency,
@@ -5245,7 +5401,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
} else if (latency >= wp->linetime_us) {
- if (IS_DISPLAY_VER(dev_priv, 9))
+ if (DISPLAY_VER(dev_priv) == 9)
selected_result = min_fixed16(method1, method2);
else
selected_result = method2;
@@ -5258,7 +5414,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
- if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) == 9) {
/* Display WA #1125: skl,bxt,kbl */
if (level == 0 && wp->rc_surface)
blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
@@ -5303,7 +5459,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
if (!skl_wm_has_lines(dev_priv, level))
lines = 0;
- if (lines > 31) {
+ if (lines > skl_wm_max_lines(dev_priv)) {
/* reject it */
result->min_ddb_alloc = U16_MAX;
return;
@@ -5375,7 +5531,7 @@ static void skl_compute_transition_wm(struct drm_i915_private *dev_priv,
* WaDisableTWM:skl,kbl,cfl,bxt
* Transition WM are not recommended by HW team for GEN9
*/
- if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
+ if (DISPLAY_VER(dev_priv) == 9)
return;
if (DISPLAY_VER(dev_priv) >= 11)
@@ -5384,7 +5540,7 @@ static void skl_compute_transition_wm(struct drm_i915_private *dev_priv,
trans_min = 14;
/* Display WA #1140: glk,cnl */
- if (IS_DISPLAY_VER(dev_priv, 10))
+ if (DISPLAY_VER(dev_priv) == 10)
trans_amount = 0;
else
trans_amount = 10; /* This is configurable amount */
@@ -5599,7 +5755,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
if (level->ignore_lines)
val |= PLANE_WM_IGNORE_LINES;
val |= level->blocks;
- val |= level->lines << PLANE_WM_LINES_SHIFT;
+ val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
intel_de_write_fw(dev_priv, reg, val);
}
@@ -5625,6 +5781,13 @@ void skl_write_plane_wm(struct intel_plane *plane,
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
skl_plane_trans_wm(pipe_wm, plane_id));
+ if (HAS_HW_SAGV_WM(dev_priv)) {
+ skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id),
+ &wm->sagv.wm0);
+ skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id),
+ &wm->sagv.trans_wm);
+ }
+
if (DISPLAY_VER(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
PLANE_BUF_CFG(pipe, plane_id), ddb_y);
@@ -5658,6 +5821,15 @@ void skl_write_cursor_wm(struct intel_plane *plane,
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe),
skl_plane_trans_wm(pipe_wm, plane_id));
+ if (HAS_HW_SAGV_WM(dev_priv)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ skl_write_wm_level(dev_priv, CUR_WM_SAGV(pipe),
+ &wm->sagv.wm0);
+ skl_write_wm_level(dev_priv, CUR_WM_SAGV_TRANS(pipe),
+ &wm->sagv.trans_wm);
+ }
+
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
@@ -5819,16 +5991,29 @@ skl_compute_ddb(struct intel_atomic_state *state)
new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+ if (IS_ALDERLAKE_P(dev_priv))
+ new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
+ old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
if (ret)
return ret;
+ if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ /* TODO: Implement vblank synchronized MBUS joining changes */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+ }
+
drm_dbg_kms(&dev_priv->drm,
- "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
+ "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
- INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
+ INTEL_INFO(dev_priv)->dbuf.slice_mask,
+ yesno(old_dbuf_state->joined_mbus),
+ yesno(new_dbuf_state->joined_mbus));
}
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -6022,6 +6207,15 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
return false;
}
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
+ const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
+
+ if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
+ !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
+ return false;
+ }
+
return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
skl_plane_trans_wm(new_pipe_wm, plane->id));
}
@@ -6207,8 +6401,7 @@ static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
level->enable = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
level->blocks = val & PLANE_WM_BLOCKS_MASK;
- level->lines = (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
+ level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
}
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
@@ -6241,7 +6434,25 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
skl_wm_level_from_reg_val(val, &wm->trans_wm);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (HAS_HW_SAGV_WM(dev_priv)) {
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&dev_priv->uncore,
+ PLANE_WM_SAGV(pipe, plane_id));
+ else
+ val = intel_uncore_read(&dev_priv->uncore,
+ CUR_WM_SAGV(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
+
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&dev_priv->uncore,
+ PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ else
+ val = intel_uncore_read(&dev_priv->uncore,
+ CUR_WM_SAGV_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
wm->sagv.wm0 = wm->wm[0];
wm->sagv.trans_wm = wm->trans_wm;
}
@@ -6254,10 +6465,14 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc *crtc;
+ if (IS_ALDERLAKE_P(dev_priv))
+ dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
+
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset;
enum plane_id plane_id;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
@@ -6283,13 +6498,20 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
- crtc_state->wm.skl.ddb = dbuf_state->ddb[pipe];
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]);
+ crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name,
dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
- dbuf_state->ddb[pipe].end, dbuf_state->active_pipes);
+ dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
+ yesno(dbuf_state->joined_mbus));
}
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
@@ -7141,6 +7363,19 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
/* Wa_14011059788:tgl,rkl,adl_s,dg1 */
intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
0, DFR_DISABLE);
+
+ /* Wa_14013723622:tgl,rkl,dg1,adl-s */
+ if (DISPLAY_VER(dev_priv) == 12)
+ intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
+ CLKREQ_POLICY_MEM_UP_OVRD, 0);
+}
+
+static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ gen12lp_init_clock_gating(dev_priv);
+
+ /* Wa_22011091694:adlp */
+ intel_de_rmw(dev_priv, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
}
static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7620,11 +7855,13 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_DG1(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv))
+ dev_priv->display.init_clock_gating = adlp_init_clock_gating;
+ else if (IS_DG1(dev_priv))
dev_priv->display.init_clock_gating = dg1_init_clock_gating;
- else if (IS_GEN(dev_priv, 12))
+ else if (GRAPHICS_VER(dev_priv) == 12)
dev_priv->display.init_clock_gating = gen12lp_init_clock_gating;
- else if (IS_GEN(dev_priv, 11))
+ else if (GRAPHICS_VER(dev_priv) == 11)
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
@@ -7648,9 +7885,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = ivb_init_clock_gating;
else if (IS_VALLEYVIEW(dev_priv))
dev_priv->display.init_clock_gating = vlv_init_clock_gating;
- else if (IS_GEN(dev_priv, 6))
+ else if (GRAPHICS_VER(dev_priv) == 6)
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- else if (IS_GEN(dev_priv, 5))
+ else if (GRAPHICS_VER(dev_priv) == 5)
dev_priv->display.init_clock_gating = ilk_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
@@ -7658,11 +7895,11 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
else if (IS_I965G(dev_priv))
dev_priv->display.init_clock_gating = i965g_init_clock_gating;
- else if (IS_GEN(dev_priv, 3))
+ else if (GRAPHICS_VER(dev_priv) == 3)
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- else if (IS_GEN(dev_priv, 2))
+ else if (GRAPHICS_VER(dev_priv) == 2)
dev_priv->display.init_clock_gating = i830_init_clock_gating;
else {
MISSING_CASE(INTEL_DEVID(dev_priv));
@@ -7676,7 +7913,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
pnv_get_mem_freq(dev_priv);
- else if (IS_GEN(dev_priv, 5))
+ else if (GRAPHICS_VER(dev_priv) == 5)
ilk_get_mem_freq(dev_priv);
if (intel_has_sagv(dev_priv))
@@ -7689,9 +7926,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
- if ((IS_DISPLAY_VER(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
+ if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (!IS_DISPLAY_VER(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
+ (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.compute_intermediate_wm =
@@ -7734,12 +7971,12 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pnv_update_wm;
- } else if (IS_DISPLAY_VER(dev_priv, 4)) {
+ } else if (DISPLAY_VER(dev_priv) == 4) {
dev_priv->display.update_wm = i965_update_wm;
- } else if (IS_DISPLAY_VER(dev_priv, 3)) {
+ } else if (DISPLAY_VER(dev_priv) == 3) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- } else if (IS_DISPLAY_VER(dev_priv, 2)) {
+ } else if (DISPLAY_VER(dev_priv) == 2) {
if (INTEL_NUM_PIPES(dev_priv) == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
@@ -7808,6 +8045,45 @@ int intel_dbuf_init(struct drm_i915_private *dev_priv)
return 0;
}
+/*
+ * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
+ * update the request state of all DBUS slices.
+ */
+static void update_mbus_pre_enable(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ u32 mbus_ctl, dbuf_min_tracker_val;
+ enum dbuf_slice slice;
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+
+ if (!IS_ALDERLAKE_P(dev_priv))
+ return;
+
+ /*
+ * TODO: Implement vblank synchronized MBUS joining changes.
+ * Must be properly coordinated with dbuf reprogramming.
+ */
+ if (dbuf_state->joined_mbus) {
+ mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
+ } else {
+ mbus_ctl = MBUS_HASHING_MODE_2x2 |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
+ }
+
+ intel_de_rmw(dev_priv, MBUS_CTL,
+ MBUS_HASHING_MODE_MASK | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
+
+ for_each_dbuf_slice(dev_priv, slice)
+ intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ dbuf_min_tracker_val);
+}
+
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -7817,11 +8093,13 @@ void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_dbuf_state(state);
if (!new_dbuf_state ||
- new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
return;
WARN_ON(!new_dbuf_state->base.changed);
+ update_mbus_pre_enable(state);
gen9_dbuf_slices_update(dev_priv,
old_dbuf_state->enabled_slices |
new_dbuf_state->enabled_slices);
@@ -7836,7 +8114,8 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_dbuf_state(state);
if (!new_dbuf_state ||
- new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
return;
WARN_ON(!new_dbuf_state->base.changed);
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 669c8d505677..91f23b7f0af2 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -78,13 +78,11 @@ struct intel_dbuf_state {
struct skl_ddb_entry ddb[I915_MAX_PIPES];
unsigned int weight[I915_MAX_PIPES];
u8 slices[I915_MAX_PIPES];
-
u8 enabled_slices;
u8 active_pipes;
+ bool joined_mbus;
};
-int intel_dbuf_init(struct drm_i915_private *dev_priv);
-
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
new file mode 100644
index 000000000000..82a6727ede46
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_range_manager.h>
+
+#include "i915_drv.h"
+#include "i915_scatterlist.h"
+
+#include "intel_region_ttm.h"
+
+/**
+ * DOC: TTM support structure
+ *
+ * The code in this file deals with setting up memory managers for TTM
+ * LMEM and MOCK regions and converting the output from
+ * the managers to struct sg_table, Basically providing the mapping from
+ * i915 GEM regions to TTM memory types and resource managers.
+ */
+
+/* A Zero-initialized driver for now. We don't have a TTM backend yet. */
+static struct ttm_device_funcs i915_ttm_bo_driver;
+
+/**
+ * intel_region_ttm_device_init - Initialize a TTM device
+ * @dev_priv: Pointer to an i915 device private structure.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *drm = &dev_priv->drm;
+
+ return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver,
+ drm->dev, drm->anon_inode->i_mapping,
+ drm->vma_offset_manager, false, false);
+}
+
+/**
+ * intel_region_ttm_device_fini - Finalize a TTM device
+ * @dev_priv: Pointer to an i915 device private structure.
+ */
+void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
+{
+ ttm_device_fini(&dev_priv->bdev);
+}
+
+/*
+ * Map the i915 memory regions to TTM memory types. We use the
+ * driver-private types for now, reserving TTM_PL_VRAM for stolen
+ * memory and TTM_PL_TT for GGTT use if decided to implement this.
+ */
+static int intel_region_to_ttm_type(struct intel_memory_region *mem)
+{
+ int type;
+
+ GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
+ mem->type != INTEL_MEMORY_MOCK);
+
+ type = mem->instance + TTM_PL_PRIV;
+ GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
+
+ return type;
+}
+
+static struct ttm_resource *
+intel_region_ttm_node_reserve(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size)
+{
+ struct ttm_resource_manager *man = mem->region_private;
+ struct ttm_place place = {};
+ struct ttm_buffer_object mock_bo = {};
+ struct ttm_resource *res;
+ int ret;
+
+ /*
+ * Having to use a mock_bo is unfortunate but stems from some
+ * drivers having private managers that insist to know what the
+ * allocate memory is intended for, using it to send private
+ * data to the manager. Also recently the bo has been used to send
+ * alignment info to the manager. Assume that apart from the latter,
+ * none of the managers we use will ever access the buffer object
+ * members, hoping we can pass the alignment info in the
+ * struct ttm_place in the future.
+ */
+
+ place.fpfn = offset >> PAGE_SHIFT;
+ place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
+ mock_bo.base.size = size;
+ ret = man->func->alloc(man, &mock_bo, &place, &res);
+ if (ret == -ENOSPC)
+ ret = -ENXIO;
+
+ return ret ? ERR_PTR(ret) : res;
+}
+
+/**
+ * intel_region_ttm_node_free - Free a node allocated from a resource manager
+ * @mem: The region the node was allocated from.
+ * @node: The opaque node representing an allocation.
+ */
+void intel_region_ttm_node_free(struct intel_memory_region *mem,
+ struct ttm_resource *res)
+{
+ struct ttm_resource_manager *man = mem->region_private;
+
+ man->func->free(man, res);
+}
+
+static const struct intel_memory_region_private_ops priv_ops = {
+ .reserve = intel_region_ttm_node_reserve,
+ .free = intel_region_ttm_node_free,
+};
+
+int intel_region_ttm_init(struct intel_memory_region *mem)
+{
+ struct ttm_device *bdev = &mem->i915->bdev;
+ int mem_type = intel_region_to_ttm_type(mem);
+ int ret;
+
+ ret = ttm_range_man_init(bdev, mem_type, false,
+ resource_size(&mem->region) >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ mem->chunk_size = PAGE_SIZE;
+ mem->max_order =
+ get_order(rounddown_pow_of_two(resource_size(&mem->region)));
+ mem->is_range_manager = true;
+ mem->priv_ops = &priv_ops;
+ mem->region_private = ttm_manager_type(bdev, mem_type);
+
+ return 0;
+}
+
+/**
+ * intel_region_ttm_fini - Finalize a TTM region.
+ * @mem: The memory region
+ *
+ * This functions takes down the TTM resource manager associated with the
+ * memory region, and if it was registered with the TTM device,
+ * removes that registration.
+ */
+void intel_region_ttm_fini(struct intel_memory_region *mem)
+{
+ int ret;
+
+ ret = ttm_range_man_fini(&mem->i915->bdev,
+ intel_region_to_ttm_type(mem));
+ GEM_WARN_ON(ret);
+ mem->region_private = NULL;
+}
+
+/**
+ * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node
+ * to an sg_table.
+ * @mem: The memory region.
+ * @node: The resource manager node obtained from the TTM resource manager.
+ *
+ * The gem backends typically use sg-tables for operations on the underlying
+ * io_memory. So provide a way for the backends to translate the
+ * nodes they are handed from TTM to sg-tables.
+ *
+ * Return: A malloced sg_table on success, an error pointer on failure.
+ */
+struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
+ struct ttm_resource *res)
+{
+ struct ttm_range_mgr_node *range_node =
+ container_of(res, typeof(*range_node), base);
+
+ GEM_WARN_ON(!mem->is_range_manager);
+ return i915_sg_from_mm_node(&range_node->mm_nodes[0],
+ mem->region.start);
+}
+
+/**
+ * intel_region_ttm_node_alloc - Allocate memory resources from a region
+ * @mem: The memory region,
+ * @size: The requested size in bytes
+ * @flags: Allocation flags
+ *
+ * This functionality is provided only for callers that need to allocate
+ * memory from standalone TTM range managers, without the TTM eviction
+ * functionality. Don't use if you are not completely sure that's the
+ * case. The returned opaque node can be converted to an sg_table using
+ * intel_region_ttm_node_to_st(), and can be freed using
+ * intel_region_ttm_node_free().
+ *
+ * Return: A valid pointer on success, an error pointer on failure.
+ */
+struct ttm_resource *
+intel_region_ttm_node_alloc(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct ttm_resource_manager *man = mem->region_private;
+ struct ttm_place place = {};
+ struct ttm_buffer_object mock_bo = {};
+ struct ttm_resource *res;
+ int ret;
+
+ /*
+ * We ignore the flags for now since we're using the range
+ * manager and contigous and min page size would be fulfilled
+ * by default if size is min page size aligned.
+ */
+ mock_bo.base.size = size;
+
+ if (mem->is_range_manager) {
+ if (size >= SZ_1G)
+ mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT;
+ else if (size >= SZ_2M)
+ mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT;
+ else if (size >= SZ_64K)
+ mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT;
+ }
+
+ ret = man->func->alloc(man, &mock_bo, &place, &res);
+ if (ret == -ENOSPC)
+ ret = -ENXIO;
+ return ret ? ERR_PTR(ret) : res;
+}
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h
new file mode 100644
index 000000000000..11b0574ab791
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_ttm.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef _INTEL_REGION_TTM_H_
+#define _INTEL_REGION_TTM_H_
+
+#include <linux/types.h>
+
+#include "i915_selftest.h"
+
+struct drm_i915_private;
+struct intel_memory_region;
+struct ttm_resource;
+
+int intel_region_ttm_device_init(struct drm_i915_private *dev_priv);
+
+void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv);
+
+int intel_region_ttm_init(struct intel_memory_region *mem);
+
+void intel_region_ttm_fini(struct intel_memory_region *mem);
+
+struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
+ struct ttm_resource *res);
+
+struct ttm_resource *
+intel_region_ttm_node_alloc(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+
+void intel_region_ttm_node_free(struct intel_memory_region *mem,
+ struct ttm_resource *node);
+#endif /* _INTEL_REGION_TTM_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 1e4ddd11c12b..183ea2b187fe 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -49,7 +49,7 @@ enum i915_drm_suspend_mode {
*/
struct intel_runtime_pm {
atomic_t wakeref_count;
- struct device *kdev; /* points to i915->drm.pdev->dev */
+ struct device *kdev; /* points to i915->drm.dev */
bool available;
bool suspended;
bool irqs_enabled;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 0ec0cf191955..f0a82b37bd1a 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -430,7 +430,7 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
if (is_read && val1)
*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
- if (INTEL_GEN(i915) > 6)
+ if (GRAPHICS_VER(i915) > 6)
return gen7_check_mailbox_status(mbox);
else
return gen6_check_mailbox_status(mbox);
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index 4d71547a5b83..ba9479a67521 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -47,6 +47,13 @@ static const struct intel_step_info adls_revid_step_tbl[] = {
[0xC] = { .gt_step = STEP_D0, .display_step = STEP_C0 },
};
+static const struct intel_step_info adlp_revid_step_tbl[] = {
+ [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
+ [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
+ [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 },
+ [0xC] = { .gt_step = STEP_C0, .display_step = STEP_D0 },
+};
+
void intel_step_init(struct drm_i915_private *i915)
{
const struct intel_step_info *revids = NULL;
@@ -54,7 +61,10 @@ void intel_step_init(struct drm_i915_private *i915)
int revid = INTEL_REVID(i915);
struct intel_step_info step = {};
- if (IS_ALDERLAKE_S(i915)) {
+ if (IS_ALDERLAKE_P(i915)) {
+ revids = adlp_revid_step_tbl;
+ size = ARRAY_SIZE(adlp_revid_step_tbl);
+ } else if (IS_ALDERLAKE_S(i915)) {
revids = adls_revid_step_tbl;
size = ARRAY_SIZE(adls_revid_step_tbl);
} else if (IS_TGL_U(i915) || IS_TGL_Y(i915)) {
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 661b50191f2b..1bed8f666048 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1635,7 +1635,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
#define fw_domain_init(uncore__, id__, set__, ack__) \
(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
- if (INTEL_GEN(i915) >= 11) {
+ if (GRAPHICS_VER(i915) >= 11) {
/* we'll prune the domains of missing engines later */
intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
int i;
@@ -1665,7 +1665,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
- } else if (IS_GEN_RANGE(i915, 9, 10)) {
+ } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
uncore->funcs.force_wake_put = fw_domains_put;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
@@ -1733,7 +1733,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
- } else if (IS_GEN(i915, 6)) {
+ } else if (GRAPHICS_VER(i915) == 6) {
uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
uncore->funcs.force_wake_put = fw_domains_put;
@@ -1800,7 +1800,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
int mmio_bar;
int mmio_size;
- mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
+ mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@@ -1810,7 +1810,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
* generations up to Ironlake.
* For dgfx chips register range is expanded to 4MB.
*/
- if (INTEL_GEN(i915) < 5)
+ if (GRAPHICS_VER(i915) < 5)
mmio_size = 512 * 1024;
else if (IS_DGFX(i915))
mmio_size = 4 * 1024 * 1024;
@@ -1849,7 +1849,7 @@ static void uncore_raw_init(struct intel_uncore *uncore)
if (intel_vgpu_active(uncore->i915)) {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
- } else if (IS_GEN(uncore->i915, 5)) {
+ } else if (GRAPHICS_VER(uncore->i915) == 5) {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
} else {
@@ -1870,7 +1870,7 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
return ret;
forcewake_early_sanitize(uncore, 0);
- if (IS_GEN_RANGE(i915, 6, 7)) {
+ if (IS_GRAPHICS_VER(i915, 6, 7)) {
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
if (IS_VALLEYVIEW(i915)) {
@@ -1879,7 +1879,7 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
} else {
ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
- } else if (IS_GEN(i915, 8)) {
+ } else if (GRAPHICS_VER(i915) == 8) {
if (IS_CHERRYVIEW(i915)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
@@ -1888,11 +1888,11 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
- } else if (IS_GEN_RANGE(i915, 9, 10)) {
+ } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
- } else if (IS_GEN(i915, 11)) {
+ } else if (GRAPHICS_VER(i915) == 11) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
@@ -1917,6 +1917,18 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (ret)
return ret;
+ /*
+ * The boot firmware initializes local memory and assesses its health.
+ * If memory training fails, the punit will have been instructed to
+ * keep the GT powered down; we won't be able to communicate with it
+ * and we should not continue with driver initialization.
+ */
+ if (IS_DGFX(i915) &&
+ !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
+ drm_err(&i915->drm, "LMEM not initialized by firmware\n");
+ return -ENODEV;
+ }
+
if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
uncore->flags |= UNCORE_HAS_FORCEWAKE;
@@ -1940,7 +1952,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
- if (IS_GEN_RANGE(i915, 6, 7))
+ if (IS_GRAPHICS_VER(i915, 6, 7))
uncore->flags |= UNCORE_HAS_FIFO;
/* clear out unclaimed reg detection bit */
@@ -1967,7 +1979,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
enum forcewake_domain_id domain_id;
int i;
- if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915) < 11)
+ if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
return;
for (i = 0; i < I915_MAX_VCS; i++) {
@@ -2008,12 +2020,14 @@ void intel_uncore_fini_mmio(struct intel_uncore *uncore)
static const struct reg_whitelist {
i915_reg_t offset_ldw;
i915_reg_t offset_udw;
- u16 gen_mask;
+ u8 min_graphics_ver;
+ u8 max_graphics_ver;
u8 size;
} reg_read_whitelist[] = { {
.offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
.offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
- .gen_mask = INTEL_GEN_MASK(4, 12),
+ .min_graphics_ver = 4,
+ .max_graphics_ver = 12,
.size = 8
} };
@@ -2038,7 +2052,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
GEM_BUG_ON(entry->size > 8);
GEM_BUG_ON(entry_offset & (entry->size - 1));
- if (INTEL_INFO(i915)->gen_mask & entry->gen_mask &&
+ if (IS_GRAPHICS_VER(i915, entry->min_graphics_ver, entry->max_graphics_ver) &&
entry_offset == (reg->offset & -entry->size))
break;
entry++;
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index ec776591e1cf..8309455f13ea 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -81,7 +81,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
if (!HAS_GT_UC(i915))
return;
- if (INTEL_GEN(i915) >= 11)
+ if (GRAPHICS_VER(i915) >= 11)
wopcm->size = GEN11_WOPCM_SIZE;
else
wopcm->size = GEN9_WOPCM_SIZE;
@@ -93,7 +93,7 @@ static u32 context_reserved_size(struct drm_i915_private *i915)
{
if (IS_GEN9_LP(i915))
return BXT_WOPCM_RC6_CTX_RESERVED;
- else if (INTEL_GEN(i915) >= 10)
+ else if (GRAPHICS_VER(i915) >= 10)
return CNL_WOPCM_HW_CTX_RESERVED;
else
return 0;
@@ -145,11 +145,11 @@ static bool check_hw_restrictions(struct drm_i915_private *i915,
u32 guc_wopcm_base, u32 guc_wopcm_size,
u32 huc_fw_size)
{
- if (IS_GEN(i915, 9) && !gen9_check_dword_gap(i915, guc_wopcm_base,
- guc_wopcm_size))
+ if (GRAPHICS_VER(i915) == 9 && !gen9_check_dword_gap(i915, guc_wopcm_base,
+ guc_wopcm_size))
return false;
- if (IS_GEN(i915, 9) &&
+ if (GRAPHICS_VER(i915) == 9 &&
!gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size))
return false;
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 4002c984c2e0..61bf4560d8af 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915)
return NULL;
kref_init(&active->ref);
- i915_active_init(&active->base, __live_active, __live_retire);
+ i915_active_init(&active->base, __live_active, __live_retire, 0);
return active;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
deleted file mode 100644
index f0f5c4df8dbc..000000000000
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ /dev/null
@@ -1,789 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#include <linux/prime_numbers.h>
-
-#include "../i915_selftest.h"
-#include "i915_random.h"
-
-static void __igt_dump_block(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block,
- bool buddy)
-{
- pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
- block->header,
- i915_buddy_block_state(block),
- i915_buddy_block_order(block),
- i915_buddy_block_offset(block),
- i915_buddy_block_size(mm, block),
- yesno(!block->parent),
- yesno(buddy));
-}
-
-static void igt_dump_block(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- struct i915_buddy_block *buddy;
-
- __igt_dump_block(mm, block, false);
-
- buddy = get_buddy(block);
- if (buddy)
- __igt_dump_block(mm, buddy, true);
-}
-
-static int igt_check_block(struct i915_buddy_mm *mm,
- struct i915_buddy_block *block)
-{
- struct i915_buddy_block *buddy;
- unsigned int block_state;
- u64 block_size;
- u64 offset;
- int err = 0;
-
- block_state = i915_buddy_block_state(block);
-
- if (block_state != I915_BUDDY_ALLOCATED &&
- block_state != I915_BUDDY_FREE &&
- block_state != I915_BUDDY_SPLIT) {
- pr_err("block state mismatch\n");
- err = -EINVAL;
- }
-
- block_size = i915_buddy_block_size(mm, block);
- offset = i915_buddy_block_offset(block);
-
- if (block_size < mm->chunk_size) {
- pr_err("block size smaller than min size\n");
- err = -EINVAL;
- }
-
- if (!is_power_of_2(block_size)) {
- pr_err("block size not power of two\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(block_size, mm->chunk_size)) {
- pr_err("block size not aligned to min size\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(offset, mm->chunk_size)) {
- pr_err("block offset not aligned to min size\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(offset, block_size)) {
- pr_err("block offset not aligned to block size\n");
- err = -EINVAL;
- }
-
- buddy = get_buddy(block);
-
- if (!buddy && block->parent) {
- pr_err("buddy has gone fishing\n");
- err = -EINVAL;
- }
-
- if (buddy) {
- if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
- pr_err("buddy has wrong offset\n");
- err = -EINVAL;
- }
-
- if (i915_buddy_block_size(mm, buddy) != block_size) {
- pr_err("buddy size mismatch\n");
- err = -EINVAL;
- }
-
- if (i915_buddy_block_state(buddy) == block_state &&
- block_state == I915_BUDDY_FREE) {
- pr_err("block and its buddy are free\n");
- err = -EINVAL;
- }
- }
-
- return err;
-}
-
-static int igt_check_blocks(struct i915_buddy_mm *mm,
- struct list_head *blocks,
- u64 expected_size,
- bool is_contiguous)
-{
- struct i915_buddy_block *block;
- struct i915_buddy_block *prev;
- u64 total;
- int err = 0;
-
- block = NULL;
- prev = NULL;
- total = 0;
-
- list_for_each_entry(block, blocks, link) {
- err = igt_check_block(mm, block);
-
- if (!i915_buddy_block_is_allocated(block)) {
- pr_err("block not allocated\n"),
- err = -EINVAL;
- }
-
- if (is_contiguous && prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = i915_buddy_block_offset(prev);
- prev_block_size = i915_buddy_block_size(mm, prev);
- offset = i915_buddy_block_offset(block);
-
- if (offset != (prev_offset + prev_block_size)) {
- pr_err("block offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- if (err)
- break;
-
- total += i915_buddy_block_size(mm, block);
- prev = block;
- }
-
- if (!err) {
- if (total != expected_size) {
- pr_err("size mismatch, expected=%llx, found=%llx\n",
- expected_size, total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- pr_err("prev block, dump:\n");
- igt_dump_block(mm, prev);
- }
-
- if (block) {
- pr_err("bad block, dump:\n");
- igt_dump_block(mm, block);
- }
-
- return err;
-}
-
-static int igt_check_mm(struct i915_buddy_mm *mm)
-{
- struct i915_buddy_block *root;
- struct i915_buddy_block *prev;
- unsigned int i;
- u64 total;
- int err = 0;
-
- if (!mm->n_roots) {
- pr_err("n_roots is zero\n");
- return -EINVAL;
- }
-
- if (mm->n_roots != hweight64(mm->size)) {
- pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
- mm->n_roots, hweight64(mm->size));
- return -EINVAL;
- }
-
- root = NULL;
- prev = NULL;
- total = 0;
-
- for (i = 0; i < mm->n_roots; ++i) {
- struct i915_buddy_block *block;
- unsigned int order;
-
- root = mm->roots[i];
- if (!root) {
- pr_err("root(%u) is NULL\n", i);
- err = -EINVAL;
- break;
- }
-
- err = igt_check_block(mm, root);
-
- if (!i915_buddy_block_is_free(root)) {
- pr_err("root not free\n");
- err = -EINVAL;
- }
-
- order = i915_buddy_block_order(root);
-
- if (!i) {
- if (order != mm->max_order) {
- pr_err("max order root missing\n");
- err = -EINVAL;
- }
- }
-
- if (prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = i915_buddy_block_offset(prev);
- prev_block_size = i915_buddy_block_size(mm, prev);
- offset = i915_buddy_block_offset(root);
-
- if (offset != (prev_offset + prev_block_size)) {
- pr_err("root offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- block = list_first_entry_or_null(&mm->free_list[order],
- struct i915_buddy_block,
- link);
- if (block != root) {
- pr_err("root mismatch at order=%u\n", order);
- err = -EINVAL;
- }
-
- if (err)
- break;
-
- prev = root;
- total += i915_buddy_block_size(mm, root);
- }
-
- if (!err) {
- if (total != mm->size) {
- pr_err("expected mm size=%llx, found=%llx\n", mm->size,
- total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- pr_err("prev root(%u), dump:\n", i - 1);
- igt_dump_block(mm, prev);
- }
-
- if (root) {
- pr_err("bad root(%u), dump:\n", i);
- igt_dump_block(mm, root);
- }
-
- return err;
-}
-
-static void igt_mm_config(u64 *size, u64 *chunk_size)
-{
- I915_RND_STATE(prng);
- u32 s, ms;
-
- /* Nothing fancy, just try to get an interesting bit pattern */
-
- prandom_seed_state(&prng, i915_selftest.random_seed);
-
- /* Let size be a random number of pages up to 8 GB (2M pages) */
- s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
- /* Let the chunk size be a random power of 2 less than size */
- ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
- /* Round size down to the chunk size */
- s &= -ms;
-
- /* Convert from pages to bytes */
- *chunk_size = (u64)ms << 12;
- *size = (u64)s << 12;
-}
-
-static int igt_buddy_alloc_smoke(void *arg)
-{
- struct i915_buddy_mm mm;
- IGT_TIMEOUT(end_time);
- I915_RND_STATE(prng);
- u64 chunk_size;
- u64 mm_size;
- int *order;
- int err, i;
-
- igt_mm_config(&mm_size, &chunk_size);
-
- pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
-
- err = i915_buddy_init(&mm, mm_size, chunk_size);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
-
- order = i915_random_order(mm.max_order + 1, &prng);
- if (!order)
- goto out_fini;
-
- for (i = 0; i <= mm.max_order; ++i) {
- struct i915_buddy_block *block;
- int max_order = order[i];
- bool timeout = false;
- LIST_HEAD(blocks);
- int order;
- u64 total;
-
- err = igt_check_mm(&mm);
- if (err) {
- pr_err("pre-mm check failed, abort\n");
- break;
- }
-
- pr_info("filling from max_order=%u\n", max_order);
-
- order = max_order;
- total = 0;
-
- do {
-retry:
- block = i915_buddy_alloc(&mm, order);
- if (IS_ERR(block)) {
- err = PTR_ERR(block);
- if (err == -ENOMEM) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- } else {
- if (order--) {
- err = 0;
- goto retry;
- }
-
- pr_err("buddy_alloc with order=%d failed(%d)\n",
- order, err);
- }
-
- break;
- }
-
- list_add_tail(&block->link, &blocks);
-
- if (i915_buddy_block_order(block) != order) {
- pr_err("buddy_alloc order mismatch\n");
- err = -EINVAL;
- break;
- }
-
- total += i915_buddy_block_size(&mm, block);
-
- if (__igt_timeout(end_time, NULL)) {
- timeout = true;
- break;
- }
- } while (total < mm.size);
-
- if (!err)
- err = igt_check_blocks(&mm, &blocks, total, false);
-
- i915_buddy_free_list(&mm, &blocks);
-
- if (!err) {
- err = igt_check_mm(&mm);
- if (err)
- pr_err("post-mm check failed\n");
- }
-
- if (err || timeout)
- break;
-
- cond_resched();
- }
-
- if (err == -ENOMEM)
- err = 0;
-
- kfree(order);
-out_fini:
- i915_buddy_fini(&mm);
-
- return err;
-}
-
-static int igt_buddy_alloc_pessimistic(void *arg)
-{
- const unsigned int max_order = 16;
- struct i915_buddy_block *block, *bn;
- struct i915_buddy_mm mm;
- unsigned int order;
- LIST_HEAD(blocks);
- int err;
-
- /*
- * Create a pot-sized mm, then allocate one of each possible
- * order within. This should leave the mm with exactly one
- * page left.
- */
-
- err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
- GEM_BUG_ON(mm.max_order != max_order);
-
- for (order = 0; order < max_order; order++) {
- block = i915_buddy_alloc(&mm, order);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- err = PTR_ERR(block);
- goto err;
- }
-
- list_add_tail(&block->link, &blocks);
- }
-
- /* And now the last remaining block available */
- block = i915_buddy_alloc(&mm, 0);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
- err = PTR_ERR(block);
- goto err;
- }
- list_add_tail(&block->link, &blocks);
-
- /* Should be completely full! */
- for (order = max_order; order--; ) {
- block = i915_buddy_alloc(&mm, order);
- if (!IS_ERR(block)) {
- pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
- order);
- list_add_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
- block = list_last_entry(&blocks, typeof(*block), link);
- list_del(&block->link);
- i915_buddy_free(&mm, block);
-
- /* As we free in increasing size, we make available larger blocks */
- order = 1;
- list_for_each_entry_safe(block, bn, &blocks, link) {
- list_del(&block->link);
- i915_buddy_free(&mm, block);
-
- block = i915_buddy_alloc(&mm, order);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
- order);
- err = PTR_ERR(block);
- goto err;
- }
- i915_buddy_free(&mm, block);
- order++;
- }
-
- /* To confirm, now the whole mm should be available */
- block = i915_buddy_alloc(&mm, max_order);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
- max_order);
- err = PTR_ERR(block);
- goto err;
- }
- i915_buddy_free(&mm, block);
-
-err:
- i915_buddy_free_list(&mm, &blocks);
- i915_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_optimistic(void *arg)
-{
- const int max_order = 16;
- struct i915_buddy_block *block;
- struct i915_buddy_mm mm;
- LIST_HEAD(blocks);
- int order;
- int err;
-
- /*
- * Create a mm with one block of each order available, and
- * try to allocate them all.
- */
-
- err = i915_buddy_init(&mm,
- PAGE_SIZE * ((1 << (max_order + 1)) - 1),
- PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
- GEM_BUG_ON(mm.max_order != max_order);
-
- for (order = 0; order <= max_order; order++) {
- block = i915_buddy_alloc(&mm, order);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- err = PTR_ERR(block);
- goto err;
- }
-
- list_add_tail(&block->link, &blocks);
- }
-
- /* Should be completely full! */
- block = i915_buddy_alloc(&mm, 0);
- if (!IS_ERR(block)) {
- pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
- list_add_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
-
-err:
- i915_buddy_free_list(&mm, &blocks);
- i915_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_pathological(void *arg)
-{
- const int max_order = 16;
- struct i915_buddy_block *block;
- struct i915_buddy_mm mm;
- LIST_HEAD(blocks);
- LIST_HEAD(holes);
- int order, top;
- int err;
-
- /*
- * Create a pot-sized mm, then allocate one of each possible
- * order within. This should leave the mm with exactly one
- * page left. Free the largest block, then whittle down again.
- * Eventually we will have a fully 50% fragmented mm.
- */
-
- err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
- GEM_BUG_ON(mm.max_order != max_order);
-
- for (top = max_order; top; top--) {
- /* Make room by freeing the largest allocated block */
- block = list_first_entry_or_null(&blocks, typeof(*block), link);
- if (block) {
- list_del(&block->link);
- i915_buddy_free(&mm, block);
- }
-
- for (order = top; order--; ) {
- block = i915_buddy_alloc(&mm, order);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
- order, top);
- err = PTR_ERR(block);
- goto err;
- }
- list_add_tail(&block->link, &blocks);
- }
-
- /* There should be one final page for this sub-allocation */
- block = i915_buddy_alloc(&mm, 0);
- if (IS_ERR(block)) {
- pr_info("buddy_alloc hit -ENOMEM for hole\n");
- err = PTR_ERR(block);
- goto err;
- }
- list_add_tail(&block->link, &holes);
-
- block = i915_buddy_alloc(&mm, top);
- if (!IS_ERR(block)) {
- pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
- top, max_order);
- list_add_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
- i915_buddy_free_list(&mm, &holes);
-
- /* Nothing larger than blocks of chunk_size now available */
- for (order = 1; order <= max_order; order++) {
- block = i915_buddy_alloc(&mm, order);
- if (!IS_ERR(block)) {
- pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
- order);
- list_add_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
-err:
- list_splice_tail(&holes, &blocks);
- i915_buddy_free_list(&mm, &blocks);
- i915_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_range(void *arg)
-{
- struct i915_buddy_mm mm;
- unsigned long page_num;
- LIST_HEAD(blocks);
- u64 chunk_size;
- u64 offset;
- u64 size;
- u64 rem;
- int err;
-
- igt_mm_config(&size, &chunk_size);
-
- pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
-
- err = i915_buddy_init(&mm, size, chunk_size);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
-
- err = igt_check_mm(&mm);
- if (err) {
- pr_err("pre-mm check failed, abort, abort, abort!\n");
- goto err_fini;
- }
-
- rem = mm.size;
- offset = 0;
-
- for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
- struct i915_buddy_block *block;
- LIST_HEAD(tmp);
-
- size = min(page_num * mm.chunk_size, rem);
-
- err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
- if (err) {
- if (err == -ENOMEM) {
- pr_info("alloc_range hit -ENOMEM with size=%llx\n",
- size);
- } else {
- pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
- offset, size, err);
- }
-
- break;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct i915_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_range has no blocks\n");
- err = -EINVAL;
- break;
- }
-
- if (i915_buddy_block_offset(block) != offset) {
- pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
- i915_buddy_block_offset(block), offset);
- err = -EINVAL;
- }
-
- if (!err)
- err = igt_check_blocks(&mm, &tmp, size, true);
-
- list_splice_tail(&tmp, &blocks);
-
- if (err)
- break;
-
- offset += size;
-
- rem -= size;
- if (!rem)
- break;
-
- cond_resched();
- }
-
- if (err == -ENOMEM)
- err = 0;
-
- i915_buddy_free_list(&mm, &blocks);
-
- if (!err) {
- err = igt_check_mm(&mm);
- if (err)
- pr_err("post-mm check failed\n");
- }
-
-err_fini:
- i915_buddy_fini(&mm);
-
- return err;
-}
-
-static int igt_buddy_alloc_limit(void *arg)
-{
- struct i915_buddy_block *block;
- struct i915_buddy_mm mm;
- const u64 size = U64_MAX;
- int err;
-
- err = i915_buddy_init(&mm, size, PAGE_SIZE);
- if (err)
- return err;
-
- if (mm.max_order != I915_BUDDY_MAX_ORDER) {
- pr_err("mm.max_order(%d) != %d\n",
- mm.max_order, I915_BUDDY_MAX_ORDER);
- err = -EINVAL;
- goto out_fini;
- }
-
- block = i915_buddy_alloc(&mm, mm.max_order);
- if (IS_ERR(block)) {
- err = PTR_ERR(block);
- goto out_fini;
- }
-
- if (i915_buddy_block_order(block) != mm.max_order) {
- pr_err("block order(%d) != %d\n",
- i915_buddy_block_order(block), mm.max_order);
- err = -EINVAL;
- goto out_free;
- }
-
- if (i915_buddy_block_size(&mm, block) !=
- BIT_ULL(mm.max_order) * PAGE_SIZE) {
- pr_err("block size(%llu) != %llu\n",
- i915_buddy_block_size(&mm, block),
- BIT_ULL(mm.max_order) * PAGE_SIZE);
- err = -EINVAL;
- goto out_free;
- }
-
-out_free:
- i915_buddy_free(&mm, block);
-out_fini:
- i915_buddy_fini(&mm);
- return err;
-}
-
-int i915_buddy_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_buddy_alloc_pessimistic),
- SUBTEST(igt_buddy_alloc_optimistic),
- SUBTEST(igt_buddy_alloc_pathological),
- SUBTEST(igt_buddy_alloc_smoke),
- SUBTEST(igt_buddy_alloc_range),
- SUBTEST(igt_buddy_alloc_limit),
- };
-
- return i915_subtests(tests, NULL);
-}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index dc394fb7ccfa..152d9ab135b1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -87,14 +87,14 @@ static void simulate_hibernate(struct drm_i915_private *i915)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
-static int pm_prepare(struct drm_i915_private *i915)
+static int igt_pm_prepare(struct drm_i915_private *i915)
{
i915_gem_suspend(i915);
return 0;
}
-static void pm_suspend(struct drm_i915_private *i915)
+static void igt_pm_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
@@ -104,7 +104,7 @@ static void pm_suspend(struct drm_i915_private *i915)
}
}
-static void pm_hibernate(struct drm_i915_private *i915)
+static void igt_pm_hibernate(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
@@ -116,7 +116,7 @@ static void pm_hibernate(struct drm_i915_private *i915)
}
}
-static void pm_resume(struct drm_i915_private *i915)
+static void igt_pm_resume(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
@@ -148,16 +148,16 @@ static int igt_gem_suspend(void *arg)
if (err)
goto out;
- err = pm_prepare(i915);
+ err = igt_pm_prepare(i915);
if (err)
goto out;
- pm_suspend(i915);
+ igt_pm_suspend(i915);
/* Here be dragons! Note that with S3RST any S3 may become S4! */
simulate_hibernate(i915);
- pm_resume(i915);
+ igt_pm_resume(i915);
err = switch_to_context(ctx);
out:
@@ -183,16 +183,16 @@ static int igt_gem_hibernate(void *arg)
if (err)
goto out;
- err = pm_prepare(i915);
+ err = igt_pm_prepare(i915);
if (err)
goto out;
- pm_hibernate(i915);
+ igt_pm_hibernate(i915);
/* Here be dragons! */
simulate_hibernate(i915);
- pm_resume(i915);
+ igt_pm_resume(i915);
err = switch_to_context(ctx);
out:
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 45c6c0107c7c..f843a5040706 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -186,7 +186,7 @@ retry:
if (err)
goto err_ppgtt_cleanup;
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
if (err) {
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
goto err_ppgtt_cleanup;
@@ -208,7 +208,7 @@ retry:
if (err)
goto err_ppgtt_cleanup;
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
if (err) {
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
goto err_ppgtt_cleanup;
@@ -325,11 +325,10 @@ retry:
BIT_ULL(size)))
goto alloc_vm_end;
- err = i915_vm_pin_pt_stash(vm, &stash);
+ err = i915_vm_map_pt_stash(vm, &stash);
if (!err)
vm->allocate_va_range(vm, &stash,
addr, BIT_ULL(size));
-
i915_vm_free_pt_stash(vm, &stash);
alloc_vm_end:
if (err == -EDEADLK) {
@@ -1885,9 +1884,9 @@ static int igt_cs_tlb(void *arg)
u32 *cs = batch + i * 64 / sizeof(*cs);
u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
- GEM_BUG_ON(INTEL_GEN(i915) < 6);
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
cs[0] = MI_STORE_DWORD_IMM_GEN4;
- if (INTEL_GEN(i915) >= 8) {
+ if (GRAPHICS_VER(i915) >= 8) {
cs[1] = lower_32_bits(addr);
cs[2] = upper_32_bits(addr);
cs[3] = i;
@@ -1968,10 +1967,9 @@ retry:
if (err)
goto end_ww;
- err = i915_vm_pin_pt_stash(vm, &stash);
+ err = i915_vm_map_pt_stash(vm, &stash);
if (!err)
vm->allocate_va_range(vm, &stash, offset, chunk_size);
-
i915_vm_free_pt_stash(vm, &stash);
end_ww:
if (err == -EDEADLK) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 3db34d3eea58..34e5caf38093 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -33,5 +33,4 @@ selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(contexts, i915_gem_context_mock_selftests)
-selftest(buddy, i915_buddy_mock_selftests)
selftest(memory_region, intel_memory_region_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index e9d86dab8677..9e9a6cb1d9e5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -98,7 +98,7 @@ test_stream(struct i915_perf *perf)
I915_ENGINE_CLASS_RENDER,
0),
.sample_flags = SAMPLE_OA_REPORT,
- .oa_format = IS_GEN(perf->i915, 12) ?
+ .oa_format = GRAPHICS_VER(perf->i915) == 12 ?
I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
};
struct i915_perf_stream *stream;
@@ -162,7 +162,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
len = 5;
- if (INTEL_GEN(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
len++;
*cs++ = GFX_OP_PIPE_CONTROL(len);
@@ -307,7 +307,7 @@ static int live_noa_gpr(void *arg)
}
/* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */
- scratch = kmap(__px_page(ce->vm->scratch[0]));
+ scratch = __px_vaddr(ce->vm->scratch[0]);
memset(scratch, POISON_FREE, PAGE_SIZE);
rq = intel_context_create_request(ce);
@@ -363,7 +363,7 @@ static int live_noa_gpr(void *arg)
}
cmd = MI_STORE_REGISTER_MEM;
- if (INTEL_GEN(i915) >= 8)
+ if (GRAPHICS_VER(i915) >= 8)
cmd++;
cmd |= MI_USE_GGTT;
@@ -405,7 +405,6 @@ static int live_noa_gpr(void *arg)
out_rq:
i915_request_put(rq);
out_ce:
- kunmap(__px_page(ce->vm->scratch[0]));
intel_context_put(ce);
out:
stream_destroy(stream);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index ee8e753d98ce..bd5c96a77ba3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -963,7 +963,7 @@ out_batch:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
- const int gen = INTEL_GEN(i915);
+ const int ver = GRAPHICS_VER(i915);
struct i915_vma *vma;
u32 *cmd;
int err;
@@ -988,11 +988,11 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
goto err;
}
- if (gen >= 8) {
+ if (ver >= 8) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*cmd++ = lower_32_bits(vma->node.start);
*cmd++ = upper_32_bits(vma->node.start);
- } else if (gen >= 6) {
+ } else if (ver >= 6) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
*cmd++ = lower_32_bits(vma->node.start);
} else {
@@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg)
for (n = 0; n < smoke[0].ncontexts; n++) {
smoke[0].contexts[n] = live_context(i915, file);
- if (!smoke[0].contexts[n]) {
- ret = -ENOMEM;
+ if (IS_ERR(smoke[0].contexts[n])) {
+ ret = PTR_ERR(smoke[0].contexts[n]);
goto out_contexts;
}
}
@@ -2482,7 +2482,7 @@ static int perf_request_latency(void *arg)
struct pm_qos_request qos;
int err = 0;
- if (INTEL_GEN(i915) < 8) /* per-engine CS timestamp, semaphores */
+ if (GRAPHICS_VER(i915) < 8) /* per-engine CS timestamp, semaphores */
return 0;
cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 5fe7b80ca0bd..dd0607254a95 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -967,6 +967,9 @@ static int igt_vma_remapped_gtt(void *arg)
intel_wakeref_t wakeref;
int err = 0;
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
obj = i915_gem_object_create_internal(i915, 10 * 10 * PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index cfbbe415b57c..24d87d0fc747 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -94,9 +94,9 @@ int igt_spinner_pin(struct igt_spinner *spin,
}
if (!spin->batch) {
- unsigned int mode =
- i915_coherent_map_type(spin->gt->i915);
+ unsigned int mode;
+ mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -174,15 +174,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
- if (INTEL_GEN(rq->engine->i915) >= 8) {
+ if (GRAPHICS_VER(rq->engine->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
- } else if (INTEL_GEN(rq->engine->i915) >= 6) {
+ } else if (GRAPHICS_VER(rq->engine->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
- } else if (INTEL_GEN(rq->engine->i915) >= 4) {
+ } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
@@ -194,11 +194,11 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = arbitration_command;
- if (INTEL_GEN(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
else if (IS_HASWELL(rq->engine->i915))
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
- else if (INTEL_GEN(rq->engine->i915) >= 6)
+ else if (GRAPHICS_VER(rq->engine->i915) >= 6)
*batch++ = MI_BATCH_BUFFER_START;
else
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
@@ -216,7 +216,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
}
flags = 0;
- if (INTEL_GEN(rq->engine->i915) <= 5)
+ if (GRAPHICS_VER(rq->engine->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index a5fc0bf3feb9..c85d516b85cd 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -57,9 +57,10 @@ static int igt_mock_fill(void *arg)
LIST_HEAD(objects);
int err = 0;
- page_size = mem->mm.chunk_size;
- max_pages = div64_u64(total, page_size);
+ page_size = mem->chunk_size;
rem = total;
+retry:
+ max_pages = div64_u64(rem, page_size);
for_each_prime_number_from(page_num, 1, max_pages) {
resource_size_t size = page_num * page_size;
@@ -85,6 +86,11 @@ static int igt_mock_fill(void *arg)
err = 0;
if (err == -ENXIO) {
if (page_num * page_size <= rem) {
+ if (mem->is_range_manager && max_pages > 1) {
+ max_pages >>= 1;
+ goto retry;
+ }
+
pr_err("%s failed, space still left in region\n",
__func__);
err = -EINVAL;
@@ -199,12 +205,18 @@ static int igt_mock_reserve(void *arg)
do {
u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
+retry:
size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
obj = igt_object_create(mem, &objects, size, 0);
if (IS_ERR(obj)) {
- if (PTR_ERR(obj) == -ENXIO)
+ if (PTR_ERR(obj) == -ENXIO) {
+ if (mem->is_range_manager &&
+ size > mem->chunk_size) {
+ size >>= 1;
+ goto retry;
+ }
break;
-
+ }
err = PTR_ERR(obj);
goto out_close;
}
@@ -220,7 +232,7 @@ static int igt_mock_reserve(void *arg)
out_close:
kfree(order);
close_objects(mem, &objects);
- i915_buddy_free_list(&mem->mm, &mem->reserved);
+ intel_memory_region_unreserve(mem);
return err;
}
@@ -240,7 +252,7 @@ static int igt_mock_contiguous(void *arg)
total = resource_size(&mem->region);
/* Min size */
- obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
+ obj = igt_object_create(mem, &objects, mem->chunk_size,
I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -321,14 +333,16 @@ static int igt_mock_contiguous(void *arg)
min = target;
target = total >> 1;
- /* Make sure we can still allocate all the fragmented space */
- obj = igt_object_create(mem, &objects, target, 0);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_close_objects;
- }
+ if (!mem->is_range_manager) {
+ /* Make sure we can still allocate all the fragmented space */
+ obj = igt_object_create(mem, &objects, target, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
- igt_object_release(obj);
+ igt_object_release(obj);
+ }
/*
* Even though we have enough free space, we don't have a big enough
@@ -348,7 +362,7 @@ static int igt_mock_contiguous(void *arg)
}
target >>= 1;
- } while (target >= mem->mm.chunk_size);
+ } while (target >= mem->chunk_size);
err_close_objects:
list_splice_tail(&holes, &objects);
@@ -368,7 +382,7 @@ static int igt_mock_splintered_region(void *arg)
/*
* Sanity check we can still allocate everything even if the
- * mm.max_order != mm.size. i.e our starting address space size is not a
+ * max_order != mm.size. i.e our starting address space size is not a
* power-of-two.
*/
@@ -377,17 +391,10 @@ static int igt_mock_splintered_region(void *arg)
if (IS_ERR(mem))
return PTR_ERR(mem);
- if (mem->mm.size != size) {
- pr_err("%s size mismatch(%llu != %llu)\n",
- __func__, mem->mm.size, size);
- err = -EINVAL;
- goto out_put;
- }
-
expected_order = get_order(rounddown_pow_of_two(size));
- if (mem->mm.max_order != expected_order) {
+ if (mem->max_order != expected_order) {
pr_err("%s order mismatch(%u != %u)\n",
- __func__, mem->mm.max_order, expected_order);
+ __func__, mem->max_order, expected_order);
err = -EINVAL;
goto out_put;
}
@@ -408,12 +415,15 @@ static int igt_mock_splintered_region(void *arg)
* sure that does indeed hold true.
*/
- obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
- if (!IS_ERR(obj)) {
- pr_err("%s too large contiguous allocation was not rejected\n",
- __func__);
- err = -EINVAL;
- goto out_close;
+ if (!mem->is_range_manager) {
+ obj = igt_object_create(mem, &objects, size,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (!IS_ERR(obj)) {
+ pr_err("%s too large contiguous allocation was not rejected\n",
+ __func__);
+ err = -EINVAL;
+ goto out_close;
+ }
}
obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
@@ -432,68 +442,6 @@ out_put:
return err;
}
-#ifndef SZ_8G
-#define SZ_8G BIT_ULL(33)
-#endif
-
-static int igt_mock_max_segment(void *arg)
-{
- const unsigned int max_segment = i915_sg_segment_size();
- struct intel_memory_region *mem = arg;
- struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
- struct i915_buddy_block *block;
- struct scatterlist *sg;
- LIST_HEAD(objects);
- u64 size;
- int err = 0;
-
- /*
- * While we may create very large contiguous blocks, we may need
- * to break those down for consumption elsewhere. In particular,
- * dma-mapping with scatterlist elements have an implicit limit of
- * UINT_MAX on each element.
- */
-
- size = SZ_8G;
- mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
- if (IS_ERR(mem))
- return PTR_ERR(mem);
-
- obj = igt_object_create(mem, &objects, size, 0);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_put;
- }
-
- size = 0;
- list_for_each_entry(block, &obj->mm.blocks, link) {
- if (i915_buddy_block_size(&mem->mm, block) > size)
- size = i915_buddy_block_size(&mem->mm, block);
- }
- if (size < max_segment) {
- pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
- __func__, max_segment, size);
- err = -EINVAL;
- goto out_close;
- }
-
- for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
- if (sg->length > max_segment) {
- pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
- __func__, sg->length, max_segment);
- err = -EINVAL;
- goto out_close;
- }
- }
-
-out_close:
- close_objects(mem, &objects);
-out_put:
- intel_memory_region_put(mem);
- return err;
-}
-
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
@@ -513,7 +461,7 @@ static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
if (err)
return err;
- ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -593,7 +541,9 @@ static int igt_gpu_write(struct i915_gem_context *ctx,
if (err)
break;
+ i915_gem_object_lock(obj, NULL);
err = igt_cpu_check(obj, dword, rng);
+ i915_gem_object_unlock(obj);
if (err)
break;
} while (!__igt_timeout(end_time, NULL));
@@ -629,6 +579,88 @@ out_put:
return err;
}
+static int igt_lmem_create_cleared_cpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ u32 size, i;
+ int err;
+
+ i915_gem_drain_freed_objects(i915);
+
+ size = max_t(u32, PAGE_SIZE, i915_prandom_u32_max_state(SZ_32M, &prng));
+ size = round_up(size, PAGE_SIZE);
+ i = 0;
+
+ do {
+ struct drm_i915_gem_object *obj;
+ unsigned int flags;
+ u32 dword, val;
+ void *vaddr;
+
+ /*
+ * Alternate between cleared and uncleared allocations, while
+ * also dirtying the pages each time to check that the pages are
+ * always cleared if requested, since we should get some overlap
+ * of the underlying pages, if not all, since we are the only
+ * user.
+ */
+
+ flags = I915_BO_ALLOC_CPU_CLEAR;
+ if (i & 1)
+ flags = 0;
+
+ obj = i915_gem_object_create_lmem(i915, size, flags);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ i915_gem_object_lock(obj, NULL);
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ dword = i915_prandom_u32_max_state(PAGE_SIZE / sizeof(u32),
+ &prng);
+
+ if (flags & I915_BO_ALLOC_CPU_CLEAR) {
+ err = igt_cpu_check(obj, dword, 0);
+ if (err) {
+ pr_err("%s failed with size=%u, flags=%u\n",
+ __func__, size, flags);
+ goto out_unpin;
+ }
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_unpin;
+ }
+
+ val = prandom_u32_state(&prng);
+
+ memset32(vaddr, val, obj->base.size / sizeof(u32));
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj);
+out_put:
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
+
+ if (err)
+ break;
+ ++i;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s completed (%u) iterations\n", __func__, i);
+
+ return err;
+}
+
static int igt_lmem_write_gpu(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -1014,7 +1046,6 @@ int intel_memory_region_mock_selftests(void)
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
- SUBTEST(igt_mock_max_segment),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
@@ -1043,6 +1074,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_lmem_create),
+ SUBTEST(igt_lmem_create_cleared_cpu),
SUBTEST(igt_lmem_write_cpu),
SUBTEST(igt_lmem_write_gpu),
};
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 0e4e6be0101d..8ef9e6a4ad05 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -125,17 +125,19 @@ static int live_forcewake_ops(void *arg)
{
static const struct reg {
const char *name;
+ u8 min_graphics_ver;
+ u8 max_graphics_ver;
unsigned long platforms;
unsigned int offset;
} registers[] = {
{
"RING_START",
- INTEL_GEN_MASK(6, 7),
+ 6, 7,
0x38,
},
{
"RING_MI_MODE",
- INTEL_GEN_MASK(8, BITS_PER_LONG),
+ 8, U8_MAX,
0x9c,
}
};
@@ -170,7 +172,7 @@ static int live_forcewake_ops(void *arg)
/* We have to pick carefully to get the exact behaviour we need */
for (r = registers; r->name; r++)
- if (r->platforms & INTEL_INFO(gt->i915)->gen_mask)
+ if (IS_GRAPHICS_VER(gt->i915, r->min_graphics_ver, r->max_graphics_ver))
break;
if (!r->name) {
pr_debug("Forcewaked register not known for %s; skipping\n",
@@ -319,7 +321,7 @@ static int live_fw_table(void *arg)
/* Confirm the table we load is still valid */
return intel_fw_table_check(gt->uncore->fw_domains_table,
gt->uncore->fw_domains_table_entries,
- INTEL_GEN(gt->i915) >= 9);
+ GRAPHICS_VER(gt->i915) >= 9);
}
int intel_uncore_live_selftests(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c
index 58710ac3f979..eb03b5b28bad 100644
--- a/drivers/gpu/drm/i915/selftests/librapl.c
+++ b/drivers/gpu/drm/i915/selftests/librapl.c
@@ -5,8 +5,18 @@
#include <asm/msr.h>
+#include "i915_drv.h"
#include "librapl.h"
+bool librapl_supported(const struct drm_i915_private *i915)
+{
+ /* Discrete cards require hwmon integration */
+ if (IS_DGFX(i915))
+ return false;
+
+ return librapl_energy_uJ();
+}
+
u64 librapl_energy_uJ(void)
{
unsigned long long power;
diff --git a/drivers/gpu/drm/i915/selftests/librapl.h b/drivers/gpu/drm/i915/selftests/librapl.h
index 887f3e91dd05..e3b24fad0a7a 100644
--- a/drivers/gpu/drm/i915/selftests/librapl.h
+++ b/drivers/gpu/drm/i915/selftests/librapl.h
@@ -8,6 +8,10 @@
#include <linux/types.h>
+struct drm_i915_private;
+
+bool librapl_supported(const struct drm_i915_private *i915);
+
u64 librapl_energy_uJ(void);
#endif /* SELFTEST_LIBRAPL_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 0188f877cab2..d189c4bd4bef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -32,6 +32,7 @@
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
#include "mock_request.h"
#include "mock_gem_device.h"
@@ -70,6 +71,7 @@ static void mock_device_release(struct drm_device *dev)
mock_fini_ggtt(&i915->ggtt);
destroy_workqueue(i915->wq);
+ intel_region_ttm_device_fini(i915);
intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
@@ -116,6 +118,7 @@ struct drm_i915_private *mock_gem_device(void)
#endif
struct drm_i915_private *i915;
struct pci_dev *pdev;
+ int ret;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
if (!pdev)
@@ -146,7 +149,6 @@ struct drm_i915_private *mock_gem_device(void)
}
pci_set_drvdata(pdev, i915);
- i915->drm.pdev = pdev;
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
@@ -162,7 +164,7 @@ struct drm_i915_private *mock_gem_device(void)
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
- mkwrite_device_info(i915)->gen = -1;
+ mkwrite_device_info(i915)->graphics_ver = -1;
mkwrite_device_info(i915)->page_sizes =
I915_GTT_PAGE_SIZE_4K |
@@ -179,6 +181,10 @@ struct drm_i915_private *mock_gem_device(void)
atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
i915->gt.awake = -ENODEV;
+ ret = intel_region_ttm_device_init(i915);
+ if (ret)
+ goto err_ttm;
+
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
goto err_drv;
@@ -202,6 +208,7 @@ struct drm_i915_private *mock_gem_device(void)
intel_engines_driver_register(i915);
i915->do_release = true;
+ ida_init(&i915->selftest.mock_region_instances);
return i915;
@@ -210,6 +217,8 @@ err_context:
err_unlock:
destroy_workqueue(i915->wq);
err_drv:
+ intel_region_ttm_device_fini(i915);
+err_ttm:
intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 5d2d010a1e22..eafc5a04975c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -1,17 +1,56 @@
// SPDX-License-Identifier: MIT
/*
- * Copyright © 2019 Intel Corporation
+ * Copyright © 2019-2021 Intel Corporation
*/
+#include <linux/scatterlist.h>
+
+#include <drm/ttm/ttm_placement.h>
+
#include "gem/i915_gem_region.h"
#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
#include "mock_region.h"
+static void mock_region_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static int mock_region_get_pages(struct drm_i915_gem_object *obj)
+{
+ unsigned int flags;
+ struct sg_table *pages;
+
+ flags = I915_ALLOC_MIN_PAGE_SIZE;
+ if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+ flags |= I915_ALLOC_CONTIGUOUS;
+
+ obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
+ obj->base.size,
+ flags);
+ if (IS_ERR(obj->mm.st_mm_node))
+ return PTR_ERR(obj->mm.st_mm_node);
+
+ pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
+ if (IS_ERR(pages)) {
+ intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
+ return PTR_ERR(pages);
+ }
+
+ __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
+
+ return 0;
+}
+
static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
.name = "mock-region",
- .get_pages = i915_gem_object_get_pages_buddy,
- .put_pages = i915_gem_object_put_pages_buddy,
+ .get_pages = mock_region_get_pages,
+ .put_pages = mock_region_put_pages,
.release = i915_gem_object_release_memory_region,
};
@@ -23,7 +62,7 @@ static int mock_object_init(struct intel_memory_region *mem,
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- if (size > mem->mm.size)
+ if (size > resource_size(&mem->region))
return -E2BIG;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
@@ -38,9 +77,18 @@ static int mock_object_init(struct intel_memory_region *mem,
return 0;
}
+static void mock_region_fini(struct intel_memory_region *mem)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ int instance = mem->instance;
+
+ intel_region_ttm_fini(mem);
+ ida_free(&i915->selftest.mock_region_instances, instance);
+}
+
static const struct intel_memory_region_ops mock_region_ops = {
- .init = intel_memory_region_init_buddy,
- .release = intel_memory_region_release_buddy,
+ .init = intel_region_ttm_init,
+ .release = mock_region_fini,
.init_object = mock_object_init,
};
@@ -51,6 +99,14 @@ mock_region_create(struct drm_i915_private *i915,
resource_size_t min_page_size,
resource_size_t io_start)
{
+ int instance = ida_alloc_max(&i915->selftest.mock_region_instances,
+ TTM_NUM_MEM_TYPES - TTM_PL_PRIV - 1,
+ GFP_KERNEL);
+
+ if (instance < 0)
+ return ERR_PTR(instance);
+
return intel_memory_region_create(i915, start, size, min_page_size,
- io_start, &mock_region_ops);
+ io_start, INTEL_MEMORY_MOCK, instance,
+ &mock_region_ops);
}