aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c151
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c481
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c359
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c377
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h73
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c559
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c185
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c256
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c106
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c300
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c166
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo_regs.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c12
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c347
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c68
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c36
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c136
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c145
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c55
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c41
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h4
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c329
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.h38
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_engine_cs.c455
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_engine_cs.h39
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c22
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c160
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c95
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c132
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c870
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c591
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c306
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c112
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c53
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c101
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c132
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c85
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c917
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c11
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c68
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c38
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c31
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c29
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c288
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h77
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h1
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c136
-rw-r--r--drivers/gpu/drm/i915/i915_params.c43
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c93
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c17
-rw-r--r--drivers/gpu/drm/i915/i915_query.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h208
-rw-r--r--drivers/gpu/drm/i915/i915_request.c57
-rw-r--r--drivers/gpu/drm/i915/i915_request.h11
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c12
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h16
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c79
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h1
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h1
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c660
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h21
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c48
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c522
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h29
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.c6
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c39
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c32
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c135
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c873
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c13
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c1
194 files changed, 8556 insertions, 5329 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0da6ea6e3f1..bda4c0e408f8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -78,6 +78,8 @@ gt-y += \
gt/debugfs_engines.o \
gt/debugfs_gt.o \
gt/debugfs_gt_pm.o \
+ gt/gen2_engine_cs.o \
+ gt/gen6_engine_cs.o \
gt/gen6_ppgtt.o \
gt/gen7_renderclear.o \
gt/gen8_ppgtt.o \
@@ -110,6 +112,7 @@ gt-y += \
gt/intel_ring_submission.o \
gt/intel_rps.o \
gt/intel_sseu.o \
+ gt/intel_sseu_debugfs.o \
gt/intel_timeline.o \
gt/intel_workarounds.o \
gt/shmem_utils.o \
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 4fec5bd64920..8c55f5bee9ab 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1469,8 +1469,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
if (gen11_dsi_is_periodic_cmd_mode(intel_dsi))
- pipe_config->hw.adjusted_mode.private_flags |=
- I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
+ pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
}
static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
@@ -1558,10 +1557,6 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
- /* We would not operate in periodic command mode */
- pipe_config->hw.adjusted_mode.private_flags &=
- ~I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
-
/*
* In case of TE GATE cmd mode, we
* receive TE from the slave if
@@ -1569,14 +1564,14 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
*/
if (is_cmd_mode(intel_dsi)) {
if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
- pipe_config->hw.adjusted_mode.private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_DSI_USE_TE1 |
I915_MODE_FLAG_DSI_USE_TE0;
else if (intel_dsi->ports == BIT(PORT_B))
- pipe_config->hw.adjusted_mode.private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_DSI_USE_TE1;
else
- pipe_config->hw.adjusted_mode.private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_DSI_USE_TE0;
}
@@ -1954,6 +1949,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
return;
err:
+ drm_connector_cleanup(connector);
drm_encoder_cleanup(&encoder->base);
kfree(intel_dsi);
kfree(intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index d043057d2fa0..630f49b7aa01 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -249,9 +249,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_post = false;
crtc_state->fifo_changed = false;
crtc_state->preload_luts = false;
+ crtc_state->inherited = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
+ crtc_state->dsb = NULL;
return &crtc_state->uapi;
}
@@ -292,6 +294,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
{
struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb);
+
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
intel_crtc_free_hw_state(crtc_state);
kfree(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 839124647202..c53c85d38fa5 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -479,7 +479,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915_modparams.vbt_sdvo_panel_type;
+ index = dev_priv->params.vbt_sdvo_panel_type;
if (index == -2) {
drm_dbg_kms(&dev_priv->drm,
"Ignore SDVO panel mode from BIOS VBT tables.\n");
@@ -722,6 +722,9 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv,
*/
if (!(power->drrs & BIT(panel_type)))
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+
+ if (bdb->version >= 232)
+ dev_priv->vbt.edp.hobl = power->hobl & BIT(panel_type);
}
static void
@@ -829,9 +832,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
- if (i915_modparams.edp_vswing) {
+ if (dev_priv->params.edp_vswing) {
dev_priv->vbt.edp.low_vswing =
- i915_modparams.edp_vswing == 1;
+ dev_priv->params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
dev_priv->vbt.edp.low_vswing = vswing == 0;
@@ -1619,30 +1622,18 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
return 0;
}
-static enum port dvo_port_to_port(u8 dvo_port)
+static enum port __dvo_port_to_port(int n_ports, int n_dvo,
+ const int port_mapping[][3], u8 dvo_port)
{
- /*
- * Each DDI port can have more than one value on the "DVO Port" field,
- * so look for all the possible values for each port.
- */
- static const int dvo_ports[][3] = {
- [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
- [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
- [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
- [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
- [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
- [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
- [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1},
- };
enum port port;
int i;
- for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) {
- for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) {
- if (dvo_ports[port][i] == -1)
+ for (port = PORT_A; port < n_ports; port++) {
+ for (i = 0; i < n_dvo; i++) {
+ if (port_mapping[port][i] == -1)
break;
- if (dvo_port == dvo_ports[port][i])
+ if (dvo_port == port_mapping[port][i])
return port;
}
}
@@ -1650,6 +1641,48 @@ static enum port dvo_port_to_port(u8 dvo_port)
return PORT_NONE;
}
+static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
+ u8 dvo_port)
+{
+ /*
+ * Each DDI port can have more than one value on the "DVO Port" field,
+ * so look for all the possible values for each port.
+ */
+ static const int port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ [PORT_E] = { DVO_PORT_HDMIE, DVO_PORT_DPE, DVO_PORT_CRT },
+ [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 },
+ [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 },
+ };
+ /*
+ * Bspec lists the ports as A, B, C, D - however internally in our
+ * driver we keep them as PORT_A, PORT_B, PORT_D and PORT_E so the
+ * registers in Display Engine match the right offsets. Apply the
+ * mapping here to translate from VBT to internal convention.
+ */
+ static const int rkl_port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_C] = { -1 },
+ [PORT_D] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_E] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ };
+
+ if (IS_ROCKETLAKE(dev_priv))
+ return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping),
+ ARRAY_SIZE(rkl_port_mapping[0]),
+ rkl_port_mapping,
+ dvo_port);
+ else
+ return __dvo_port_to_port(ARRAY_SIZE(port_mapping),
+ ARRAY_SIZE(port_mapping[0]),
+ port_mapping,
+ dvo_port);
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv,
struct display_device_data *devdata,
u8 bdb_version)
@@ -1659,7 +1692,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
enum port port;
- port = dvo_port_to_port(child->dvo_port);
+ port = dvo_port_to_port(dev_priv, child->dvo_port);
if (port == PORT_NONE)
return;
@@ -2603,10 +2636,10 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
aux_ch = AUX_CH_B;
break;
case DP_AUX_C:
- aux_ch = AUX_CH_C;
+ aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_D : AUX_CH_C;
break;
case DP_AUX_D:
- aux_ch = AUX_CH_D;
+ aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_E : AUX_CH_D;
break;
case DP_AUX_E:
aux_ch = AUX_CH_E;
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index fef04e2d954e..bd060404d249 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -5,12 +5,12 @@
#include <drm/drm_atomic_state_helper.h>
+#include "intel_atomic.h"
#include "intel_bw.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
-#include "intel_sideband.h"
-#include "intel_atomic.h"
#include "intel_pm.h"
-
+#include "intel_sideband.h"
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -199,6 +199,12 @@ static const struct intel_sa_info tgl_sa_info = {
.displayrtids = 256,
};
+static const struct intel_sa_info rkl_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 20, /* GB/s */
+ .displayrtids = 128,
+};
+
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@@ -309,7 +315,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_GEN(dev_priv, 12))
+ if (IS_ROCKETLAKE(dev_priv))
+ icl_get_bw_info(dev_priv, &rkl_sa_info);
+ else if (IS_GEN(dev_priv, 12))
icl_get_bw_info(dev_priv, &tgl_sa_info);
else if (IS_GEN(dev_priv, 11))
icl_get_bw_info(dev_priv, &icl_sa_info);
@@ -420,6 +428,141 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_bw_state *new_bw_state = NULL;
+ struct intel_bw_state *old_bw_state = NULL;
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int max_bw = 0;
+ int slice_id;
+ enum pipe pipe;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ enum plane_id plane_id;
+ struct intel_dbuf_bw *crtc_bw;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe];
+
+ memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *plane_alloc =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_ddb_entry *uv_plane_alloc =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
+ unsigned int data_rate = crtc_state->data_rate[plane_id];
+ unsigned int dbuf_mask = 0;
+
+ dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
+ dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
+
+ /*
+ * FIXME: To calculate that more properly we probably
+ * need to to split per plane data_rate into data_rate_y
+ * and data_rate_uv for multiplanar formats in order not
+ * to get accounted those twice if they happen to reside
+ * on different slices.
+ * However for pre-icl this would work anyway because
+ * we have only single slice and for icl+ uv plane has
+ * non-zero data rate.
+ * So in worst case those calculation are a bit
+ * pessimistic, which shouldn't pose any significant
+ * problem anyway.
+ */
+ for_each_dbuf_slice_in_mask(slice_id, dbuf_mask)
+ crtc_bw->used_bw[slice_id] += data_rate;
+ }
+ }
+
+ if (!old_bw_state)
+ return 0;
+
+ for_each_pipe(dev_priv, pipe) {
+ struct intel_dbuf_bw *crtc_bw;
+
+ crtc_bw = &new_bw_state->dbuf_bw[pipe];
+
+ for_each_dbuf_slice(slice_id) {
+ /*
+ * Current experimental observations show that contrary
+ * to BSpec we get underruns once we exceed 64 * CDCLK
+ * for slices in total.
+ * As a temporary measure in order not to keep CDCLK
+ * bumped up all the time we calculate CDCLK according
+ * to this formula for overall bw consumed by slices.
+ */
+ max_bw += crtc_bw->used_bw[slice_id];
+ }
+ }
+
+ new_bw_state->min_cdclk = max_bw / 64;
+
+ if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
+ int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_bw_state *new_bw_state = NULL;
+ struct intel_bw_state *old_bw_state = NULL;
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int min_cdclk = 0;
+ enum pipe pipe;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ }
+
+ if (!old_bw_state)
+ return 0;
+
+ for_each_pipe(dev_priv, pipe) {
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ if (!cdclk_state)
+ return 0;
+
+ min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+ }
+
+ new_bw_state->min_cdclk = min_cdclk;
+
+ if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
+ int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index bbcaaa73ec1b..46c6eecbd917 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -9,14 +9,20 @@
#include <drm/drm_atomic.h>
#include "intel_display.h"
+#include "intel_display_power.h"
#include "intel_global_state.h"
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc_state;
+struct intel_dbuf_bw {
+ int used_bw[I915_MAX_DBUF_SLICES];
+};
+
struct intel_bw_state {
struct intel_global_state base;
+ struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
/*
* Contains a bit mask, used to determine, whether correspondent
@@ -36,6 +42,8 @@ struct intel_bw_state {
/* bitmask of active pipes */
u8 active_pipes;
+
+ int min_cdclk;
};
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
@@ -56,5 +64,7 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
u32 points_mask);
+int intel_bw_calc_min_cdclk(struct intel_atomic_state *state);
+int skl_bw_calc_min_cdclk(struct intel_atomic_state *state);
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 979a0241fdcb..bb91dace304a 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -21,7 +21,10 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/time.h>
+
#include "intel_atomic.h"
+#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
@@ -2077,8 +2080,15 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
* Explicitly stating here that this seems to be currently
* rather a Hack, than final solution.
*/
- if (IS_TIGERLAKE(dev_priv))
- min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+ if (IS_TIGERLAKE(dev_priv)) {
+ /*
+ * Clamp to max_cdclk_freq in case pixel rate is higher,
+ * in order not to break an 8K, but still leave W/A at place.
+ */
+ min_cdclk = max_t(int, min_cdclk,
+ min_t(int, crtc_state->pixel_rate,
+ dev_priv->max_cdclk_freq));
+ }
if (min_cdclk > dev_priv->max_cdclk_freq) {
drm_dbg_kms(&dev_priv->drm,
@@ -2094,6 +2104,7 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
{
struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_bw_state *bw_state = NULL;
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int min_cdclk, i;
@@ -2106,6 +2117,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
if (min_cdclk < 0)
return min_cdclk;
+ bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(bw_state))
+ return PTR_ERR(bw_state);
+
if (cdclk_state->min_cdclk[i] == min_cdclk)
continue;
@@ -2117,9 +2132,15 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
}
min_cdclk = cdclk_state->force_min_cdclk;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(dev_priv, pipe) {
min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+ if (!bw_state)
+ continue;
+
+ min_cdclk = max(bw_state->min_cdclk, min_cdclk);
+ }
+
return min_cdclk;
}
@@ -2700,29 +2721,59 @@ static int vlv_hrawclk(struct drm_i915_private *dev_priv)
CCK_DISPLAY_REF_CLOCK_CONTROL);
}
-static int g4x_hrawclk(struct drm_i915_private *dev_priv)
+static int i9xx_hrawclk(struct drm_i915_private *dev_priv)
{
u32 clkcfg;
- /* hrawclock is 1/4 the FSB frequency */
- clkcfg = intel_de_read(dev_priv, CLKCFG);
- switch (clkcfg & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_400:
- return 100000;
- case CLKCFG_FSB_533:
- return 133333;
- case CLKCFG_FSB_667:
- return 166667;
- case CLKCFG_FSB_800:
- return 200000;
- case CLKCFG_FSB_1067:
- case CLKCFG_FSB_1067_ALT:
- return 266667;
- case CLKCFG_FSB_1333:
- case CLKCFG_FSB_1333_ALT:
- return 333333;
- default:
- return 133333;
+ /*
+ * hrawclock is 1/4 the FSB frequency
+ *
+ * Note that this only reads the state of the FSB
+ * straps, not the actual FSB frequency. Some BIOSen
+ * let you configure each independently. Ideally we'd
+ * read out the actual FSB frequency but sadly we
+ * don't know which registers have that information,
+ * and all the relevant docs have gone to bit heaven :(
+ */
+ clkcfg = intel_de_read(dev_priv, CLKCFG) & CLKCFG_FSB_MASK;
+
+ if (IS_MOBILE(dev_priv)) {
+ switch (clkcfg) {
+ case CLKCFG_FSB_400:
+ return 100000;
+ case CLKCFG_FSB_533:
+ return 133333;
+ case CLKCFG_FSB_667:
+ return 166667;
+ case CLKCFG_FSB_800:
+ return 200000;
+ case CLKCFG_FSB_1067:
+ return 266667;
+ case CLKCFG_FSB_1333:
+ return 333333;
+ default:
+ MISSING_CASE(clkcfg);
+ return 133333;
+ }
+ } else {
+ switch (clkcfg) {
+ case CLKCFG_FSB_400_ALT:
+ return 100000;
+ case CLKCFG_FSB_533:
+ return 133333;
+ case CLKCFG_FSB_667:
+ return 166667;
+ case CLKCFG_FSB_800:
+ return 200000;
+ case CLKCFG_FSB_1067_ALT:
+ return 266667;
+ case CLKCFG_FSB_1333_ALT:
+ return 333333;
+ case CLKCFG_FSB_1600_ALT:
+ return 400000;
+ default:
+ return 133333;
+ }
}
}
@@ -2743,8 +2794,8 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
freq = pch_rawclk(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
freq = vlv_hrawclk(dev_priv);
- else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
- freq = g4x_hrawclk(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 3)
+ freq = i9xx_hrawclk(dev_priv);
else
/* no rawclk on other platforms, or no need to know it */
return 0;
@@ -2760,25 +2811,30 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) >= 12) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_ELKHARTLAKE(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = icl_calc_voltage_level;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_CANNONLAKE(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = cnl_calc_voltage_level;
dev_priv->cdclk.table = cnl_cdclk_table;
} else if (IS_GEN9_LP(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = bxt_calc_voltage_level;
@@ -2787,18 +2843,23 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
else
dev_priv->cdclk.table = bxt_cdclk_table;
} else if (IS_GEN9_BC(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
} else if (IS_BROADWELL(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = bdw_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
} else if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = chv_set_cdclk;
dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
} else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = vlv_set_cdclk;
dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
} else {
+ dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 98ece9cd7cdd..945bb03bdd4d 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -714,16 +714,16 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
-static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
+static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
- intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
- intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
- intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
/*
* Program the gc max 2 register to clamp values > 1.0.
@@ -731,15 +731,13 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
* from 3.0 to 7.0
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0),
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0),
1 << 16);
- intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1),
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1),
1 << 16);
- intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2),
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2),
1 << 16);
}
-
- intel_dsb_put(dsb);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
@@ -753,7 +751,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
} else {
@@ -761,7 +759,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
ivb_load_lut_10(crtc, blob,
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
}
}
@@ -776,7 +774,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
} else {
@@ -784,7 +782,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
bdw_load_lut_10(crtc, blob,
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
}
}
@@ -877,7 +875,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
ilk_load_lut_8(crtc, gamma_lut);
} else {
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
}
}
@@ -900,14 +898,12 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
- intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
- intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
- intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
- intel_dsb_put(dsb);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue);
}
static void
@@ -916,7 +912,6 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
- struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
int i;
@@ -927,19 +922,17 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
* 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
* 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
- intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ intel_dsb_reg_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
-
- intel_dsb_put(dsb);
}
static void
@@ -949,7 +942,6 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
- struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
int i;
@@ -963,12 +955,13 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
* seg2[0] being unused by the hardware.
*/
- intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
@@ -986,24 +979,22 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
icl_load_gcmax(crtc_state, entry);
- ivb_load_lut_ext_max(crtc);
- intel_dsb_put(dsb);
+ ivb_load_lut_ext_max(crtc_state);
}
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_dsb *dsb = intel_dsb_get(crtc);
if (crtc_state->hw.degamma_lut)
glk_load_degamma_lut(crtc_state);
@@ -1018,11 +1009,10 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
break;
default:
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_ext_max(crtc);
+ ivb_load_lut_ext_max(crtc_state);
}
- intel_dsb_commit(dsb);
- intel_dsb_put(dsb);
+ intel_dsb_commit(crtc_state);
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 9ff05ec12115..eccaa79cb4a9 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -181,11 +181,25 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, CHICKEN_MISC_2, val);
}
+static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
+{
+ /*
+ * Some platforms only expect PHY_MISC to be programmed for PHY-A and
+ * PHY-B and may not even have instances of the register for the
+ * other combo PHY's.
+ */
+ if (IS_ELKHARTLAKE(i915) ||
+ IS_ROCKETLAKE(i915))
+ return phy < PHY_C;
+
+ return true;
+}
+
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
enum phy phy)
{
/* The PHY C added by EHL has no PHY_MISC register */
- if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ if (!has_phy_misc(dev_priv, phy))
return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
else
return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) &
@@ -220,6 +234,27 @@ static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
return false;
}
+static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
+{
+ /*
+ * Certain PHYs are connected to compensation resistors and act
+ * as masters to other PHYs.
+ *
+ * ICL,TGL:
+ * A(master) -> B(slave), C(slave)
+ * RKL:
+ * A(master) -> B(slave)
+ * C(master) -> D(slave)
+ *
+ * We must set the IREFGEN bit for any PHY acting as a master
+ * to another PHY.
+ */
+ if (IS_ROCKETLAKE(dev_priv) && phy == PHY_C)
+ return true;
+
+ return phy == PHY_A;
+}
+
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum phy phy)
{
@@ -229,9 +264,21 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
if (!icl_combo_phy_enabled(dev_priv, phy))
return false;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN0(phy),
+ ICL_PORT_TX_DW8_ODCC_CLK_SEL |
+ ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
+ ICL_PORT_TX_DW8_ODCC_CLK_SEL |
+ ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
+
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN0(phy),
+ DCC_MODE_SELECT_MASK,
+ DCC_MODE_SELECT_CONTINUOSLY);
+ }
+
ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- if (phy == PHY_A) {
+ if (phy_is_master(dev_priv, phy)) {
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
@@ -317,12 +364,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
continue;
}
- /*
- * Although EHL adds a combo PHY C, there's no PHY_MISC
- * register for it and no need to program the
- * DE_IO_COMP_PWR_DOWN setting on PHY C.
- */
- if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
/*
@@ -345,9 +387,22 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
skip_phy_misc:
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN0(phy));
+ val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
+ val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
+ val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val);
+
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ val &= ~DCC_MODE_SELECT_MASK;
+ val |= DCC_MODE_SELECT_CONTINUOSLY;
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ }
+
cnl_set_procmon_ref_values(dev_priv, phy);
- if (phy == PHY_A) {
+ if (phy_is_master(dev_priv, phy)) {
val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
val |= IREFGEN;
intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
@@ -376,12 +431,7 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
"Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
- /*
- * Although EHL adds a combo PHY C, there's no PHY_MISC
- * register for it and no need to program the
- * DE_IO_COMP_PWR_DOWN setting on PHY C.
- */
- if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 2f5b9a4baafd..5b4510ce5693 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -833,7 +833,7 @@ intel_crt_detect(struct drm_connector *connector,
connector->base.id, connector->name,
force);
- if (i915_modparams.load_detect_test) {
+ if (dev_priv->params.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
goto load_detect;
@@ -889,7 +889,7 @@ load_detect:
else if (INTEL_GEN(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
- else if (i915_modparams.load_detect_test)
+ else if (dev_priv->params.load_detect_test)
status = connector_status_disconnected;
else
status = connector_status_unknown;
diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
index 3112572cfb7d..f22a7645c249 100644
--- a/drivers/gpu/drm/i915/display/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -40,6 +40,10 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
+#define RKL_CSR_PATH "i915/rkl_dmc_ver2_01.bin"
+#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 1)
+MODULE_FIRMWARE(RKL_CSR_PATH);
+
#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin"
#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6)
#define TGL_CSR_MAX_FW_SIZE 0x6000
@@ -682,7 +686,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
*/
intel_csr_runtime_pm_get(dev_priv);
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ csr->fw_path = RKL_CSR_PATH;
+ csr->required_version = RKL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ } else if (INTEL_GEN(dev_priv) >= 12) {
csr->fw_path = TGL_CSR_PATH;
csr->required_version = TGL_CSR_VERSION_REQUIRED;
/* Allow to load fw via parameter using the last known size */
@@ -699,7 +707,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
csr->fw_path = GLK_CSR_PATH;
csr->required_version = GLK_CSR_VERSION_REQUIRED;
csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ } else if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) {
csr->fw_path = KBL_CSR_PATH;
csr->required_version = KBL_CSR_VERSION_REQUIRED;
csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
@@ -713,15 +723,15 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
}
- if (i915_modparams.dmc_firmware_path) {
- if (strlen(i915_modparams.dmc_firmware_path) == 0) {
+ if (dev_priv->params.dmc_firmware_path) {
+ if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
csr->fw_path = NULL;
drm_info(&dev_priv->drm,
"Disabling CSR firmware and runtime PM\n");
return;
}
- csr->fw_path = i915_modparams.dmc_firmware_path;
+ csr->fw_path = dev_priv->params.dmc_firmware_path;
/* Bypass version check for firmware override. */
csr->required_version = 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 0575a1eea2a1..2c484b55bcdf 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -639,11 +639,25 @@ struct tgl_dkl_phy_ddi_buf_trans {
static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
- { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
- { 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */
+ { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
{ 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
{ 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
- { 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */
+ { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
{ 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
{ 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
{ 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
@@ -693,8 +707,10 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] =
};
static const struct ddi_buf_trans *
-bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (dev_priv->vbt.edp.low_vswing) {
*n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
return bdw_ddi_translations_edp;
@@ -705,8 +721,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct ddi_buf_trans *
-skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (IS_SKL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
return skl_y_ddi_translations_dp;
@@ -720,12 +738,18 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct ddi_buf_trans *
-kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
- if (IS_KBL_ULX(dev_priv) || IS_CFL_ULX(dev_priv)) {
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
- } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
+ } else if (IS_KBL_ULT(dev_priv) ||
+ IS_CFL_ULT(dev_priv) ||
+ IS_CML_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
return kbl_u_ddi_translations_dp;
} else {
@@ -735,15 +759,21 @@ kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct ddi_buf_trans *
-skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) ||
+ IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
- } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
- IS_CFL_ULT(dev_priv)) {
+ } else if (IS_SKL_ULT(dev_priv) ||
+ IS_KBL_ULT(dev_priv) ||
+ IS_CFL_ULT(dev_priv) ||
+ IS_CML_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
return skl_u_ddi_translations_edp;
} else {
@@ -752,17 +782,21 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- return kbl_get_buf_trans_dp(dev_priv, n_entries);
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
+ return kbl_get_buf_trans_dp(encoder, n_entries);
else
- return skl_get_buf_trans_dp(dev_priv, n_entries);
+ return skl_get_buf_trans_dp(encoder, n_entries);
}
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) ||
+ IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
@@ -781,18 +815,21 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries)
}
static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
- enum port port, int *n_entries)
+intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) {
const struct ddi_buf_trans *ddi_translations =
- kbl_get_buf_trans_dp(dev_priv, n_entries);
- *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ kbl_get_buf_trans_dp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
return ddi_translations;
} else if (IS_SKYLAKE(dev_priv)) {
const struct ddi_buf_trans *ddi_translations =
- skl_get_buf_trans_dp(dev_priv, n_entries);
- *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ skl_get_buf_trans_dp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
return ddi_translations;
} else if (IS_BROADWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
@@ -807,16 +844,17 @@ intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
}
static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
- enum port port, int *n_entries)
+intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (IS_GEN9_BC(dev_priv)) {
const struct ddi_buf_trans *ddi_translations =
- skl_get_buf_trans_edp(dev_priv, n_entries);
- *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ skl_get_buf_trans_edp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
return ddi_translations;
} else if (IS_BROADWELL(dev_priv)) {
- return bdw_get_buf_trans_edp(dev_priv, n_entries);
+ return bdw_get_buf_trans_edp(encoder, n_entries);
} else if (IS_HASWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
return hsw_ddi_translations_dp;
@@ -843,9 +881,11 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
}
static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
+intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (IS_GEN9_BC(dev_priv)) {
return skl_get_buf_trans_hdmi(dev_priv, n_entries);
} else if (IS_BROADWELL(dev_priv)) {
@@ -861,33 +901,36 @@ intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
}
static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
*n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
return bxt_ddi_translations_dp;
}
static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (dev_priv->vbt.edp.low_vswing) {
*n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
return bxt_ddi_translations_edp;
}
- return bxt_get_buf_trans_dp(dev_priv, n_entries);
+ return bxt_get_buf_trans_dp(encoder, n_entries);
}
static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
{
*n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
return bxt_ddi_translations_hdmi;
}
static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
@@ -907,8 +950,9 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
@@ -928,8 +972,9 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (dev_priv->vbt.edp.low_vswing) {
@@ -948,14 +993,16 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
return NULL;
} else {
- return cnl_get_buf_trans_dp(dev_priv, n_entries);
+ return cnl_get_buf_trans_dp(encoder, n_entries);
}
}
static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+icl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (type == INTEL_OUTPUT_HDMI) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
return icl_combo_phy_ddi_translations_hdmi;
@@ -972,7 +1019,7 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
}
static const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+icl_get_mg_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
if (type == INTEL_OUTPUT_HDMI) {
@@ -988,7 +1035,7 @@ icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
}
static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ehl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
@@ -996,15 +1043,15 @@ ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return ehl_combo_phy_ddi_translations_dp;
}
- return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+ return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
}
static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) {
- return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+ return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
} else if (rate > 270000) {
*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
return tgl_combo_phy_ddi_translations_dp_hbr2;
@@ -1014,6 +1061,22 @@ tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return tgl_combo_phy_ddi_translations_dp_hbr;
}
+static const struct tgl_dkl_phy_ddi_buf_trans *
+tgl_get_dkl_buf_trans(struct intel_encoder *encoder, int type, int rate,
+ int *n_entries)
+{
+ if (type == INTEL_OUTPUT_HDMI) {
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
+ return tgl_dkl_phy_hdmi_ddi_trans;
+ } else if (rate > 270000) {
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2);
+ return tgl_dkl_phy_dp_ddi_trans_hbr2;
+ }
+
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
+ return tgl_dkl_phy_dp_ddi_trans;
+}
+
static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1022,33 +1085,34 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
- tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ tgl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
- n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
+ tgl_get_dkl_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0,
+ &n_entries);
default_entry = n_entries - 1;
} else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ icl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
- icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
+ icl_get_mg_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0,
&n_entries);
default_entry = n_entries - 1;
} else if (IS_CANNONLAKE(dev_priv)) {
- cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+ cnl_get_buf_trans_hdmi(encoder, &n_entries);
default_entry = n_entries - 1;
} else if (IS_GEN9_LP(dev_priv)) {
- bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+ bxt_get_buf_trans_hdmi(encoder, &n_entries);
default_entry = n_entries - 1;
} else if (IS_GEN9_BC(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
default_entry = 8;
} else if (IS_BROADWELL(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
default_entry = 7;
} else if (IS_HASWELL(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
default_entry = 6;
} else {
drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
@@ -1086,10 +1150,10 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
&n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
+ ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
&n_entries);
else
- ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
+ ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
&n_entries);
/* If we're boosting the current, set bit 31 of trans1 */
@@ -1118,7 +1182,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
enum port port = encoder->port;
const struct ddi_buf_trans *ddi_translations;
- ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -1139,16 +1203,30 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
{
- i915_reg_t reg = DDI_BUF_CTL(port);
- int i;
+ if (IS_BROXTON(dev_priv)) {
+ udelay(16);
+ return;
+ }
- for (i = 0; i < 16; i++) {
- udelay(1);
- if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
- return;
+ if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE), 8))
+ drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get idle\n",
+ port_name(port));
+}
+
+static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ /* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
+ if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+ usleep_range(518, 1000);
+ return;
}
- drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n",
- port_name(port));
+
+ if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE), 500))
+ drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
+ port_name(port));
}
static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
@@ -1349,10 +1427,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- intel_dp->DP = intel_dig_port->saved_port_bits |
+ intel_dp->DP = dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
}
@@ -1608,7 +1685,6 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 ctl;
if (INTEL_GEN(dev_priv) >= 11) {
enum transcoder master_transcoder = crtc_state->master_transcoder;
@@ -1626,10 +1702,9 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
}
- ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
- ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ intel_ddi_transcoder_func_reg_val_get(encoder,
+ crtc_state));
}
/*
@@ -2027,9 +2102,8 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
u8 iboost;
if (type == INTEL_OUTPUT_HDMI)
@@ -2042,11 +2116,13 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int n_entries;
if (type == INTEL_OUTPUT_HDMI)
- ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
else if (type == INTEL_OUTPUT_EDP)
- ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+ ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
+ &n_entries);
else
- ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
+ ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
+ &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -2062,9 +2138,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
return;
}
- _skl_ddi_set_iboost(dev_priv, port, iboost);
+ _skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
- if (port == PORT_A && intel_dig_port->max_lanes == 4)
+ if (encoder->port == PORT_A && dig_port->max_lanes == 4)
_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
}
@@ -2077,11 +2153,11 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
int n_entries;
if (type == INTEL_OUTPUT_HDMI)
- ddi_translations = bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+ ddi_translations = bxt_get_buf_trans_hdmi(encoder, &n_entries);
else if (type == INTEL_OUTPUT_EDP)
- ddi_translations = bxt_get_buf_trans_edp(dev_priv, &n_entries);
+ ddi_translations = bxt_get_buf_trans_edp(encoder, &n_entries);
else
- ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
+ ddi_translations = bxt_get_buf_trans_dp(encoder, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -2095,45 +2171,46 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
ddi_translations[level].deemphasis);
}
-u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
+static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp)
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
- tgl_get_combo_buf_trans(dev_priv, encoder->type,
+ tgl_get_combo_buf_trans(encoder, encoder->type,
intel_dp->link_rate, &n_entries);
else
- n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
+ tgl_get_dkl_buf_trans(encoder, encoder->type,
+ intel_dp->link_rate, &n_entries);
} else if (INTEL_GEN(dev_priv) == 11) {
if (IS_ELKHARTLAKE(dev_priv))
- ehl_get_combo_buf_trans(dev_priv, encoder->type,
+ ehl_get_combo_buf_trans(encoder, encoder->type,
intel_dp->link_rate, &n_entries);
else if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(dev_priv, encoder->type,
+ icl_get_combo_buf_trans(encoder, encoder->type,
intel_dp->link_rate, &n_entries);
else
- icl_get_mg_buf_trans(dev_priv, encoder->type,
+ icl_get_mg_buf_trans(encoder, encoder->type,
intel_dp->link_rate, &n_entries);
} else if (IS_CANNONLAKE(dev_priv)) {
if (encoder->type == INTEL_OUTPUT_EDP)
- cnl_get_buf_trans_edp(dev_priv, &n_entries);
+ cnl_get_buf_trans_edp(encoder, &n_entries);
else
- cnl_get_buf_trans_dp(dev_priv, &n_entries);
+ cnl_get_buf_trans_dp(encoder, &n_entries);
} else if (IS_GEN9_LP(dev_priv)) {
if (encoder->type == INTEL_OUTPUT_EDP)
- bxt_get_buf_trans_edp(dev_priv, &n_entries);
+ bxt_get_buf_trans_edp(encoder, &n_entries);
else
- bxt_get_buf_trans_dp(dev_priv, &n_entries);
+ bxt_get_buf_trans_dp(encoder, &n_entries);
} else {
if (encoder->type == INTEL_OUTPUT_EDP)
- intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+ intel_ddi_get_buf_trans_edp(encoder, &n_entries);
else
- intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
+ intel_ddi_get_buf_trans_dp(encoder, &n_entries);
}
if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
@@ -2151,19 +2228,9 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
* used on all DDI platforms. Should that change we need to
* rethink this code.
*/
-u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing)
-{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
+static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp)
+{
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
@@ -2176,11 +2243,11 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
u32 val;
if (type == INTEL_OUTPUT_HDMI)
- ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+ ddi_translations = cnl_get_buf_trans_hdmi(encoder, &n_entries);
else if (type == INTEL_OUTPUT_EDP)
- ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries);
+ ddi_translations = cnl_get_buf_trans_edp(encoder, &n_entries);
else
- ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
+ ddi_translations = cnl_get_buf_trans_dp(encoder, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -2297,22 +2364,23 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
}
-static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
- u32 level, enum phy phy, int type,
- int rate)
+static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
+ u32 level, int type, int rate)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
const struct cnl_ddi_buf_trans *ddi_translations = NULL;
u32 n_entries, val;
int ln;
if (INTEL_GEN(dev_priv) >= 12)
- ddi_translations = tgl_get_combo_buf_trans(dev_priv, type, rate,
+ ddi_translations = tgl_get_combo_buf_trans(encoder, type, rate,
&n_entries);
else if (IS_ELKHARTLAKE(dev_priv))
- ddi_translations = ehl_get_combo_buf_trans(dev_priv, type, rate,
+ ddi_translations = ehl_get_combo_buf_trans(encoder, type, rate,
&n_entries);
else
- ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+ ddi_translations = icl_get_combo_buf_trans(encoder, type, rate,
&n_entries);
if (!ddi_translations)
return;
@@ -2424,7 +2492,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
- icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
+ icl_ddi_combo_vswing_program(encoder, level, type, rate);
/* 6. Set training enable to trigger update */
val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
@@ -2448,7 +2516,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
rate = intel_dp->link_rate;
}
- ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate,
+ ddi_translations = icl_get_mg_buf_trans(encoder, type, rate,
&n_entries);
/* The table does not have values for level 3 and level 9. */
if (level >= n_entries || level == 3 || level == 9) {
@@ -2585,15 +2653,17 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
+ int rate = 0;
if (type == INTEL_OUTPUT_HDMI) {
- n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
- ddi_translations = tgl_dkl_phy_hdmi_ddi_trans;
- } else {
- n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
- ddi_translations = tgl_dkl_phy_dp_ddi_trans;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ rate = intel_dp->link_rate;
}
+ ddi_translations = tgl_get_dkl_buf_trans(encoder, encoder->type, rate,
+ &n_entries);
+
if (level >= n_entries)
level = n_entries - 1;
@@ -2964,15 +3034,15 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
static void
-icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
+icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
u32 ln0, ln1, pin_assignment;
u8 width;
- if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
+ if (dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
if (INTEL_GEN(dev_priv) >= 12) {
@@ -2991,13 +3061,13 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
/* DPPATC */
- pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port);
+ pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port);
width = crtc_state->lane_count;
switch (pin_assignment) {
case 0x0:
drm_WARN_ON(&dev_priv->drm,
- intel_dig_port->tc_mode != TC_PORT_LEGACY);
+ dig_port->tc_mode != TC_PORT_LEGACY);
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
@@ -3344,11 +3414,10 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int level = intel_ddi_hdmi_level(encoder);
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, crtc_state);
@@ -3375,9 +3444,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
intel_ddi_enable_pipe_clock(encoder, crtc_state);
- intel_dig_port->set_infoframes(encoder,
- crtc_state->has_infoframe,
- crtc_state, conn_state);
+ dig_port->set_infoframes(encoder,
+ crtc_state->has_infoframe,
+ crtc_state, conn_state);
}
static void intel_ddi_pre_enable(struct intel_atomic_state *state,
@@ -3943,10 +4012,9 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(intel_dig_port->base.base.dev);
- enum port port = intel_dig_port->base.port;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
u32 dp_tp_ctl, ddi_buf_ctl;
bool wait = false;
@@ -3985,7 +4053,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
- udelay(600);
+ intel_wait_ddi_buf_active(dev_priv, port);
}
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
@@ -4155,11 +4223,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
return;
- if (INTEL_GEN(dev_priv) >= 12) {
- intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(cpu_transcoder);
- intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(cpu_transcoder);
- }
-
intel_dsc_get_config(encoder, pipe_config);
temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -4261,6 +4324,16 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder transcoder =
+ intel_dp_mst_is_slave_trans(pipe_config) ?
+ pipe_config->mst_master_transcoder :
+ pipe_config->cpu_transcoder;
+
+ intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
+ intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
+ }
+
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -4496,39 +4569,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
};
static struct intel_connector *
-intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_connector *connector;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
return NULL;
- intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
- intel_dig_port->dp.prepare_link_retrain =
- intel_ddi_prepare_link_retrain;
- intel_dig_port->dp.set_link_train = intel_ddi_set_link_train;
- intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
+ dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
+ dig_port->dp.set_link_train = intel_ddi_set_link_train;
+ dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
if (INTEL_GEN(dev_priv) >= 12)
- intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels;
+ dig_port->dp.set_signal_levels = tgl_set_signal_levels;
else if (INTEL_GEN(dev_priv) >= 11)
- intel_dig_port->dp.set_signal_levels = icl_set_signal_levels;
+ dig_port->dp.set_signal_levels = icl_set_signal_levels;
else if (IS_CANNONLAKE(dev_priv))
- intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels;
+ dig_port->dp.set_signal_levels = cnl_set_signal_levels;
else if (IS_GEN9_LP(dev_priv))
- intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels;
+ dig_port->dp.set_signal_levels = bxt_set_signal_levels;
else
- intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
+ dig_port->dp.set_signal_levels = hsw_set_signal_levels;
+
+ dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
+ dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
if (INTEL_GEN(dev_priv) < 12) {
- intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
- intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
+ dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
+ dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
}
- if (!intel_dp_init_connector(intel_dig_port, connector)) {
+ if (!intel_dp_init_connector(dig_port, connector)) {
kfree(connector);
return NULL;
}
@@ -4727,29 +4802,29 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
}
static struct intel_connector *
-intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
+intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
{
struct intel_connector *connector;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
return NULL;
- intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
- intel_hdmi_init_connector(intel_dig_port, connector);
+ dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+ intel_hdmi_init_connector(dig_port, connector);
return connector;
}
-static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
+static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- if (dport->base.port != PORT_A)
+ if (dig_port->base.port != PORT_A)
return false;
- if (dport->saved_port_bits & DDI_A_4_LANES)
+ if (dig_port->saved_port_bits & DDI_A_4_LANES)
return false;
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
@@ -4771,10 +4846,10 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
}
static int
-intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
+intel_ddi_max_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dport->base.base.dev);
- enum port port = intel_dport->base.port;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
int max_lanes = 4;
if (INTEL_GEN(dev_priv) >= 11)
@@ -4793,10 +4868,10 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
* wasn't lit up at boot. Force this bit set when needed
* so we use the proper lane count for our calculations.
*/
- if (intel_ddi_a_force_4_lanes(intel_dport)) {
+ if (intel_ddi_a_force_4_lanes(dig_port)) {
drm_dbg_kms(&dev_priv->drm,
"Forcing DDI_A_4_LANES for port A\n");
- intel_dport->saved_port_bits |= DDI_A_4_LANES;
+ dig_port->saved_port_bits |= DDI_A_4_LANES;
max_lanes = 4;
}
@@ -4805,7 +4880,7 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
- struct intel_digital_port *intel_dig_port;
+ struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum phy phy = intel_port_to_phy(dev_priv, port);
@@ -4834,11 +4909,11 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
return;
}
- intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
- if (!intel_dig_port)
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
return;
- encoder = &intel_dig_port->base;
+ encoder = &dig_port->base;
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
@@ -4865,49 +4940,49 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
- intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
- DDI_BUF_CTL(port)) &
- DDI_BUF_PORT_REVERSAL;
+ dig_port->saved_port_bits =
+ intel_de_read(dev_priv, DDI_BUF_CTL(port))
+ & DDI_BUF_PORT_REVERSAL;
else
- intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
- DDI_BUF_CTL(port)) &
- (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+ dig_port->saved_port_bits =
+ intel_de_read(dev_priv, DDI_BUF_CTL(port))
+ & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
- intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
- intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
- intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->dp.output_reg = INVALID_MMIO_REG;
+ dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
if (intel_phy_is_tc(dev_priv, phy)) {
bool is_legacy =
!intel_bios_port_supports_typec_usb(dev_priv, port) &&
!intel_bios_port_supports_tbt(dev_priv, port);
- intel_tc_port_init(intel_dig_port, is_legacy);
+ intel_tc_port_init(dig_port, is_legacy);
encoder->update_prepare = intel_ddi_update_prepare;
encoder->update_complete = intel_ddi_update_complete;
}
drm_WARN_ON(&dev_priv->drm, port > PORT_I);
- intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
+ dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
port - PORT_A;
if (init_dp) {
- if (!intel_ddi_init_dp_connector(intel_dig_port))
+ if (!intel_ddi_init_dp_connector(dig_port))
goto err;
- intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
}
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
- if (!intel_ddi_init_hdmi_connector(intel_dig_port))
+ if (!intel_ddi_init_hdmi_connector(dig_port))
goto err;
}
if (init_lspcon) {
- if (lspcon_init(intel_dig_port))
+ if (lspcon_init(dig_port))
/* TODO: handle hdmi info frame part */
drm_dbg_kms(&dev_priv->drm,
"LSPCON init success on port %c\n",
@@ -4924,26 +4999,26 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
if (INTEL_GEN(dev_priv) >= 11) {
if (intel_phy_is_tc(dev_priv, phy))
- intel_dig_port->connected = intel_tc_port_connected;
+ dig_port->connected = intel_tc_port_connected;
else
- intel_dig_port->connected = lpt_digital_port_connected;
+ dig_port->connected = lpt_digital_port_connected;
} else if (INTEL_GEN(dev_priv) >= 8) {
if (port == PORT_A || IS_GEN9_LP(dev_priv))
- intel_dig_port->connected = bdw_digital_port_connected;
+ dig_port->connected = bdw_digital_port_connected;
else
- intel_dig_port->connected = lpt_digital_port_connected;
+ dig_port->connected = lpt_digital_port_connected;
} else {
if (port == PORT_A)
- intel_dig_port->connected = hsw_digital_port_connected;
+ dig_port->connected = hsw_digital_port_connected;
else
- intel_dig_port->connected = lpt_digital_port_connected;
+ dig_port->connected = lpt_digital_port_connected;
}
- intel_infoframe_init(intel_dig_port);
+ intel_infoframe_init(dig_port);
return;
err:
drm_encoder_cleanup(&encoder->base);
- kfree(intel_dig_port);
+ kfree(dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index fbdf8ddde486..077e9dbbe367 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -42,9 +42,6 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
u32 ddi_signal_levels(struct intel_dp *intel_dp);
-u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
-u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
- u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 26996e1839e2..729ec6e0d43a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -35,6 +35,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
@@ -1611,13 +1612,13 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport,
+ struct intel_digital_port *dig_port,
unsigned int expected_mask)
{
u32 port_mask;
i915_reg_t dpll_reg;
- switch (dport->base.port) {
+ switch (dig_port->base.port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
dpll_reg = DPLL(0);
@@ -1639,7 +1640,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
port_mask, expected_mask, 1000))
drm_WARN(&dev_priv->drm, 1,
"timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
- dport->base.base.base.id, dport->base.base.name,
+ dig_port->base.base.base.id, dig_port->base.base.name,
intel_de_read(dev_priv, dpll_reg) & port_mask,
expected_mask);
}
@@ -4823,11 +4824,18 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
- if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
+ switch (plane_state->hw.color_encoding) {
+ case DRM_COLOR_YCBCR_BT709:
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
- else
- plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
-
+ break;
+ case DRM_COLOR_YCBCR_BT2020:
+ plane_color_ctl |=
+ PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
+ break;
+ default:
+ plane_color_ctl |=
+ PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
+ }
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
@@ -4890,7 +4898,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
int ret;
/* reset doesn't touch the display */
- if (!i915_modparams.force_reset_modeset_test &&
+ if (!dev_priv->params.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
@@ -6435,8 +6443,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
* We can't read out IPS on broadwell, assume the worst and
* forcibly enable IPS on the first fastset.
*/
- if (new_crtc_state->update_pipe &&
- old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
+ if (new_crtc_state->update_pipe && old_crtc_state->inherited)
return true;
return !old_crtc_state->ips_enabled;
@@ -7223,30 +7230,33 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
return false;
-
- if (IS_ELKHARTLAKE(dev_priv))
+ else if (IS_ROCKETLAKE(dev_priv))
+ return phy <= PHY_D;
+ else if (IS_ELKHARTLAKE(dev_priv))
return phy <= PHY_C;
-
- if (INTEL_GEN(dev_priv) >= 11)
+ else if (INTEL_GEN(dev_priv) >= 11)
return phy <= PHY_B;
-
- return false;
+ else
+ return false;
}
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (INTEL_GEN(dev_priv) >= 12)
+ if (IS_ROCKETLAKE(dev_priv))
+ return false;
+ else if (INTEL_GEN(dev_priv) >= 12)
return phy >= PHY_D && phy <= PHY_I;
-
- if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
+ else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
return phy >= PHY_C && phy <= PHY_F;
-
- return false;
+ else
+ return false;
}
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
{
- if (IS_ELKHARTLAKE(i915) && port == PORT_D)
+ if (IS_ROCKETLAKE(i915) && port >= PORT_D)
+ return (enum phy)port - 1;
+ else if (IS_ELKHARTLAKE(i915) && port == PORT_D)
return PHY_A;
return (enum phy)port;
@@ -7517,6 +7527,10 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_crtc_vblank_on(new_crtc_state);
intel_encoders_enable(state, crtc);
+
+ /* prevents spurious underruns */
+ if (IS_GEN(dev_priv, 2))
+ intel_wait_for_vblank(dev_priv, pipe);
}
static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
@@ -7590,6 +7604,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
to_intel_bw_state(dev_priv->bw_obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(dev_priv->cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum intel_display_power_domain domain;
@@ -7663,6 +7679,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
cdclk_state->min_voltage_level[pipe] = 0;
cdclk_state->active_pipes &= ~BIT(pipe);
+ dbuf_state->active_pipes &= ~BIT(pipe);
+
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
}
@@ -7880,7 +7898,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
if (!hsw_crtc_supports_ips(crtc))
return false;
- if (!i915_modparams.enable_ips)
+ if (!dev_priv->params.enable_ips)
return false;
if (crtc_state->pipe_bpp > 24)
@@ -8151,8 +8169,8 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- if (i915_modparams.panel_use_ssc >= 0)
- return i915_modparams.panel_use_ssc != 0;
+ if (dev_priv->params.panel_use_ssc >= 0)
+ return dev_priv->params.panel_use_ssc != 0;
return dev_priv->vbt.lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
@@ -8897,7 +8915,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
- mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
}
@@ -10056,7 +10073,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
- if (crtc_state->limited_color_range)
+ if (crtc_state->limited_color_range &&
+ !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= PIPECONF_COLOR_RANGE_SELECT;
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -10894,7 +10912,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
- unsigned long panel_transcoder_mask = 0;
+ unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
intel_wakeref_t wf;
@@ -10904,9 +10922,6 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
- panel_transcoder_mask |= BIT(TRANSCODER_EDP);
-
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
* and DSI transcoders handled below.
@@ -10917,9 +10932,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- for_each_set_bit(panel_transcoder,
- &panel_transcoder_mask,
- ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
+ for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
+ panel_transcoder_mask) {
bool force_thru = false;
enum pipe trans_pipe;
@@ -12511,7 +12525,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
continue;
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
- if (!icl_is_nv12_y_plane(linked->id))
+ if (!icl_is_nv12_y_plane(dev_priv, linked->id))
continue;
if (crtc_state->active_planes & BIT(linked->id))
@@ -12557,6 +12571,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
else if (linked->id == PLANE_SPRITE4)
plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
+ else if (linked->id == PLANE_SPRITE3)
+ plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
+ else if (linked->id == PLANE_SPRITE2)
+ plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
else
MISSING_CASE(linked->id);
}
@@ -12580,12 +12598,15 @@ static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ int linetime_wm;
if (!crtc_state->hw.enable)
return 0;
- return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- adjusted_mode->crtc_clock);
+ linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+ adjusted_mode->crtc_clock);
+
+ return min(linetime_wm, 0x1ff);
}
static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
@@ -12593,12 +12614,15 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ int linetime_wm;
if (!crtc_state->hw.enable)
return 0;
- return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- cdclk_state->logical.cdclk);
+ linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+ cdclk_state->logical.cdclk);
+
+ return min(linetime_wm, 0x1ff);
}
static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
@@ -12607,7 +12631,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- u16 linetime_wm;
+ int linetime_wm;
if (!crtc_state->hw.enable)
return 0;
@@ -12619,7 +12643,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
linetime_wm /= 2;
- return linetime_wm;
+ return min(linetime_wm, 0x1ff);
}
static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
@@ -13582,8 +13606,8 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
static bool fastboot_enabled(struct drm_i915_private *dev_priv)
{
- if (i915_modparams.fastboot != -1)
- return i915_modparams.fastboot;
+ if (dev_priv->params.fastboot != -1)
+ return dev_priv->params.fastboot;
/* Enable fastboot by default on Skylake and newer */
if (INTEL_GEN(dev_priv) >= 9)
@@ -13607,8 +13631,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bool ret = true;
u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
- (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
- !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
+ current_config->inherited && !pipe_config->inherited;
if (fixup_inherited && !fastboot_enabled(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
@@ -14019,10 +14042,10 @@ static void verify_wm_state(struct intel_crtc *crtc,
hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 &&
- hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
+ hw_enabled_slices != dev_priv->dbuf.enabled_slices)
drm_err(&dev_priv->drm,
"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- dev_priv->enabled_dbuf_slices_mask,
+ dev_priv->dbuf.enabled_slices,
hw_enabled_slices);
/* planes */
@@ -14416,6 +14439,8 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
+ crtc->mode_flags = crtc_state->mode_flags;
+
/*
* The scanline counter increments at the leading edge of hsync.
*
@@ -14563,20 +14588,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
state->modeset = true;
state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
- state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
-
- if (state->active_pipe_changes) {
+ if (state->active_pipes != dev_priv->active_pipes) {
ret = _intel_atomic_lock_global_state(state);
if (ret)
return ret;
}
- ret = intel_modeset_calc_cdclk(state);
- if (ret)
- return ret;
-
- intel_modeset_clear_plls(state);
-
if (IS_HASWELL(dev_priv))
return hsw_mode_set_planes_workaround(state);
@@ -14653,11 +14670,10 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
/* See {hsw,vlv,ivb}_plane_ratio() */
return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_IVYBRIDGE(dev_priv);
+ IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
}
-static int intel_atomic_check_planes(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
+static int intel_atomic_check_planes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -14699,7 +14715,13 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- if (hweight8(old_active_planes) == hweight8(new_active_planes))
+ /*
+ * Not only the number of planes, but if the plane configuration had
+ * changed might already mean we need to recompute min CDCLK,
+ * because different planes might consume different amount of Dbuf bandwidth
+ * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
+ */
+ if (old_active_planes == new_active_planes)
continue;
ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
@@ -14707,6 +14729,21 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
return ret;
}
+ return 0;
+}
+
+static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_cdclk_state *new_cdclk_state;
+ struct intel_plane_state *plane_state;
+ struct intel_bw_state *new_bw_state;
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+ enum pipe pipe;
+ int ret;
+ int i;
/*
* active_planes bitmask has been updated, and potentially
* affected planes are part of the state. We can now
@@ -14718,6 +14755,30 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
return ret;
}
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+
+ if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
+ *need_cdclk_calc = true;
+
+ ret = dev_priv->display.bw_calc_min_cdclk(state);
+ if (ret)
+ return ret;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+
+ if (!new_cdclk_state || !new_bw_state)
+ return 0;
+
+ for_each_pipe(dev_priv, pipe) {
+ min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
+
+ /*
+ * Currently do this change only if we need to increase
+ */
+ if (new_bw_state->min_cdclk > min_cdclk)
+ *need_cdclk_calc = true;
+ }
+
return 0;
}
@@ -14769,16 +14830,13 @@ static int intel_atomic_check(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_cdclk_state *new_cdclk_state;
struct intel_crtc *crtc;
int ret, i;
bool any_ms = false;
- /* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->uapi.mode.private_flags !=
- old_crtc_state->uapi.mode.private_flags)
+ if (new_crtc_state->inherited != old_crtc_state->inherited)
new_crtc_state->uapi.mode_changed = true;
}
@@ -14880,14 +14938,10 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- ret = intel_atomic_check_planes(state, &any_ms);
+ ret = intel_atomic_check_planes(state);
if (ret)
goto fail;
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
- if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
- any_ms = true;
-
/*
* distrust_bios_wm will force a full dbuf recomputation
* but the hardware state will only get updated accordingly
@@ -14908,10 +14962,6 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = intel_atomic_check_crtcs(state);
- if (ret)
- goto fail;
-
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@@ -14921,6 +14971,22 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ ret = intel_atomic_check_cdclk(state, &any_ms);
+ if (ret)
+ goto fail;
+
+ if (any_ms) {
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+
+ intel_modeset_clear_plls(state);
+ }
+
+ ret = intel_atomic_check_crtcs(state);
+ if (ret)
+ goto fail;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!needs_modeset(new_crtc_state) &&
@@ -14951,8 +15017,24 @@ static int intel_atomic_check(struct drm_device *dev,
static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
{
- return drm_atomic_helper_prepare_planes(state->base.dev,
- &state->base);
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
+ if (ret < 0)
+ return ret;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ bool mode_changed = needs_modeset(crtc_state);
+
+ if (mode_changed || crtc_state->update_pipe ||
+ crtc_state->uapi.color_mgmt_changed) {
+ intel_dsb_prepare(crtc_state);
+ }
+ }
+
+ return 0;
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
@@ -15124,7 +15206,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
* of enabling them on the CRTC's first fastset.
*/
if (new_crtc_state->update_pipe && !modeset &&
- old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ old_crtc_state->inherited)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@@ -15216,29 +15298,6 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
}
}
-static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
- u8 required_slices = state->enabled_dbuf_slices_mask;
- u8 slices_union = hw_enabled_slices | required_slices;
-
- /* If 2nd DBuf slice required, enable it here */
- if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
- icl_dbuf_slices_update(dev_priv, slices_union);
-}
-
-static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
- u8 required_slices = state->enabled_dbuf_slices_mask;
-
- /* If 2nd DBuf slice is no more required disable it */
- if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
- icl_dbuf_slices_update(dev_priv, required_slices);
-}
-
static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -15405,15 +15464,27 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
&wait_reset);
}
+static void intel_cleanup_dsbs(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i)
+ intel_dsb_cleanup(old_crtc_state);
+}
+
static void intel_atomic_cleanup_work(struct work_struct *work)
{
- struct drm_atomic_state *state =
- container_of(work, struct drm_atomic_state, commit_work);
- struct drm_i915_private *i915 = to_i915(state->dev);
+ struct intel_atomic_state *state =
+ container_of(work, struct intel_atomic_state, base.commit_work);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
- drm_atomic_helper_cleanup_planes(&i915->drm, state);
- drm_atomic_helper_commit_cleanup_done(state);
- drm_atomic_state_put(state);
+ intel_cleanup_dsbs(state);
+ drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
+ drm_atomic_helper_commit_cleanup_done(&state->base);
+ drm_atomic_state_put(&state->base);
intel_atomic_helper_free_state(i915);
}
@@ -15479,9 +15550,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
intel_encoders_update_prepare(state);
- /* Enable all new slices, we might need */
- if (state->modeset)
- icl_dbuf_slice_pre_update(state);
+ intel_dbuf_pre_plane_update(state);
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.commit_modeset_enables(state);
@@ -15536,9 +15605,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
dev_priv->display.optimize_watermarks(state, crtc);
}
- /* Disable all slices, we don't need */
- if (state->modeset)
- icl_dbuf_slice_post_update(state);
+ intel_dbuf_post_plane_update(state);
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
@@ -15547,6 +15614,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
modeset_put_power_domains(dev_priv, put_domains[i]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
+
+ /*
+ * DSB cleanup is done in cleanup_work aligning with framebuffer
+ * cleanup. So copy and reset the dsb structure to sync with
+ * commit_done and later do dsb cleanup in cleanup_work.
+ */
+ old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
}
/* Underruns don't always raise interrupts, so check manually */
@@ -15696,8 +15770,15 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_atomic_swap_global_state(state);
if (ret) {
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
i915_sw_fence_commit(&state->commit_ready);
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ intel_dsb_cleanup(new_crtc_state);
+
drm_atomic_helper_cleanup_planes(dev, &state->base);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
@@ -16252,7 +16333,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
* On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
+ INTEL_NUM_PIPES(dev_priv) == 2)
plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
else
plane->i9xx_plane = (enum i9xx_plane_id) pipe;
@@ -16417,6 +16499,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+ if (INTEL_GEN(dev_priv) >= 12)
+ drm_plane_enable_fb_damage_clips(&cursor->base);
+
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return cursor;
@@ -16757,7 +16842,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_D); /* DDI TC1 */
+ intel_ddi_init(dev_priv, PORT_E); /* DDI TC2 */
+ } else if (INTEL_GEN(dev_priv) >= 12) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_D);
@@ -17432,23 +17522,35 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(i915->cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->dbuf.obj.state);
intel_update_cdclk(i915);
intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
+
+ dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
{
struct drm_plane *plane;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
- drm_for_each_crtc(crtc, state->dev) {
- struct drm_crtc_state *crtc_state;
+ for_each_intel_crtc(state->dev, crtc) {
+ struct intel_crtc_state *crtc_state;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ crtc_state = intel_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
+
+ if (crtc_state->hw.active) {
+ /*
+ * Preserve the inherited flag to avoid
+ * taking the full modeset path.
+ */
+ crtc_state->inherited = true;
+ }
}
drm_for_each_plane(plane, state->dev) {
@@ -17590,6 +17692,15 @@ retry:
}
if (crtc_state->hw.active) {
+ /*
+ * We've not yet detected sink capabilities
+ * (audio,infoframes,etc.) and thus we don't want to
+ * force a full state recomputation yet. We want that to
+ * happen only for the first real commit from userspace.
+ * So preserve the inherited flag for the time being.
+ */
+ crtc_state->inherited = true;
+
ret = drm_atomic_add_affected_planes(state, &crtc->base);
if (ret)
goto out;
@@ -17677,7 +17788,8 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
if (IS_I845G(i915) || IS_I865G(i915)) {
mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
mode_config->cursor_height = 1023;
- } else if (IS_GEN(i915, 2)) {
+ } else if (IS_I830(i915) || IS_I85X(i915) ||
+ IS_I915G(i915) || IS_I915GM(i915)) {
mode_config->cursor_width = 64;
mode_config->cursor_height = 64;
} else {
@@ -17723,6 +17835,10 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
if (ret)
return ret;
+ ret = intel_dbuf_init(i915);
+ if (ret)
+ return ret;
+
ret = intel_bw_init(i915);
if (ret)
return ret;
@@ -18239,6 +18355,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(dev_priv->cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(dev_priv->dbuf.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@@ -18269,7 +18387,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
enableddisabled(crtc_state->hw.active));
}
- dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
+ dev_priv->active_pipes = cdclk_state->active_pipes =
+ dbuf_state->active_pipes = active_pipes;
readout_plane_state(dev_priv);
@@ -18360,7 +18479,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* set a flag to indicate that a full recalculation is
* needed on the next commit.
*/
- mode->private_flags = I915_MODE_FLAG_INHERITED;
+ crtc_state->inherited = true;
intel_crtc_compute_pixel_rate(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 3a06f72c9859..e890c8fb779b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -187,6 +187,13 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+#define for_each_dbuf_slice_in_mask(__slice, __mask) \
+ for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
+ for_each_if((BIT(__slice)) & (__mask))
+
+#define for_each_dbuf_slice(__slice) \
+ for_each_dbuf_slice_in_mask(__slice, BIT(I915_MAX_DBUF_SLICES) - 1)
+
enum port {
PORT_NONE = -1,
@@ -535,7 +542,7 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport,
+ struct intel_digital_port *dig_port,
unsigned int expected_mask);
int intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 70525623bcdf..3644752cc5ec 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -125,7 +125,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
- yesno(i915_modparams.enable_ips));
+ yesno(dev_priv->params.enable_ips));
if (INTEL_GEN(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
@@ -1099,10 +1099,10 @@ static void drrs_status_per_crtc(struct seq_file *m,
seq_puts(m, "\n\t\t");
if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
- vrefresh = panel->fixed_mode->vrefresh;
+ vrefresh = drm_mode_vrefresh(panel->fixed_mode);
} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
- vrefresh = panel->downclock_mode->vrefresh;
+ vrefresh = drm_mode_vrefresh(panel->downclock_mode);
} else {
seq_printf(m, "DRRS_State: Unknown(%d)\n",
drrs->refresh_rate_type);
@@ -1194,7 +1194,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
+ struct intel_digital_port *dig_port;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
@@ -1207,14 +1207,14 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
continue;
- intel_dig_port = enc_to_dig_port(intel_encoder);
- if (!intel_dig_port->dp.can_mst)
+ dig_port = enc_to_dig_port(intel_encoder);
+ if (!dig_port->dp.can_mst)
continue;
seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
- drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@ -2218,7 +2218,8 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
}
if (INTEL_GEN(dev_priv) >= 10 &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ !to_intel_connector(connector)->mst_port) ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP))
debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
connector, &i915_dsc_fec_support_fops);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 49998906cc61..0c713e83274d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1161,7 +1161,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
+ u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
drm_WARN(&dev_priv->drm,
hw_enabled_dbuf_slices != enabled_dbuf_slices,
@@ -1817,8 +1817,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
+ enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
mutex_lock(&power_domains->lock);
@@ -1943,22 +1943,29 @@ static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
- return !WARN_ON(power_domains->async_put_domains[0] &
- power_domains->async_put_domains[1]);
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+ return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
+ power_domains->async_put_domains[1]);
}
static bool
__async_put_domains_state_ok(struct i915_power_domains *power_domains)
{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
enum intel_display_power_domain domain;
bool err = false;
err |= !assert_async_put_domain_masks_disjoint(power_domains);
- err |= WARN_ON(!!power_domains->async_put_wakeref !=
- !!__async_put_domains_mask(power_domains));
+ err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
+ !!__async_put_domains_mask(power_domains));
for_each_power_domain(domain, __async_put_domains_mask(power_domains))
- err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
+ err |= drm_WARN_ON(&i915->drm,
+ power_domains->domain_use_count[domain] != 1);
return !err;
}
@@ -2200,11 +2207,14 @@ static void
queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref)
{
- WARN_ON(power_domains->async_put_wakeref);
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+ drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
- WARN_ON(!queue_delayed_work(system_unbound_wq,
- &power_domains->async_put_work,
- msecs_to_jiffies(100)));
+ drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
+ &power_domains->async_put_work,
+ msecs_to_jiffies(100)));
}
static void
@@ -2913,6 +2923,53 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
+#define RKL_PW_4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define RKL_PW_3_POWER_DOMAINS ( \
+ RKL_PW_4_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+/*
+ * There is no PW_2/PG_2 on RKL.
+ *
+ * RKL PW_1/PG_1 domains (under HW/DMC control):
+ * - DBUF function (note: registers are in PW0)
+ * - PIPE_A and its planes and VDSC/joining, except VGA
+ * - transcoder A
+ * - DDI_A and DDI_B
+ * - FBC
+ *
+ * RKL PW_0/PG_0 domains (under HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - shared functions:
+ * * interrupts except pipe interrupts
+ * * MBus except PIPE_MBUS_DBOX_CTL
+ * * DBUF registers
+ * - central power except FBC
+ * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
+ */
+
+#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ RKL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -4283,6 +4340,140 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
};
+static const struct i915_power_well_desc rkl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_DC_OFF,
+ },
+ {
+ .name = "power well 3",
+ .domains = RKL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = ICL_DISP_PW_3,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well 4",
+ .domains = RKL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ }
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ }
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ }
+ },
+ {
+ .name = "DDI D TC1 IO",
+ .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
+ },
+ },
+ {
+ .name = "DDI E TC2 IO",
+ .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX D TC1",
+ .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
+ },
+ },
+ {
+ .name = "AUX E TC2",
+ .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
+ .ops = &icl_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
+ },
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -4322,7 +4513,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
mask = 0;
}
- if (!i915_modparams.disable_power_well)
+ if (!dev_priv->params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -4365,6 +4556,9 @@ __set_power_wells(struct i915_power_domains *power_domains,
const struct i915_power_well_desc *power_well_descs,
int power_well_count)
{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
u64 power_well_ids = 0;
int i;
@@ -4384,8 +4578,8 @@ __set_power_wells(struct i915_power_domains *power_domains,
if (id == DISP_PW_ID_NONE)
continue;
- WARN_ON(id >= sizeof(power_well_ids) * 8);
- WARN_ON(power_well_ids & BIT_ULL(id));
+ drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
+ drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
power_well_ids |= BIT_ULL(id);
}
@@ -4408,11 +4602,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
int err;
- i915_modparams.disable_power_well =
+ dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
- i915_modparams.disable_power_well);
+ dev_priv->params.disable_power_well);
dev_priv->csr.allowed_dc_mask =
- get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
+ get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
dev_priv->csr.target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
@@ -4428,7 +4622,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_GEN(dev_priv, 12)) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ err = set_power_wells(power_domains, rkl_power_wells);
+ } else if (IS_GEN(dev_priv, 12)) {
err = set_power_wells(power_domains, tgl_power_wells);
} else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
@@ -4491,45 +4687,38 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
-static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
- i915_reg_t reg, bool enable)
+static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
+ enum dbuf_slice slice, bool enable)
{
- u32 val, status;
+ i915_reg_t reg = DBUF_CTL_S(slice);
+ bool state;
+ u32 val;
val = intel_de_read(dev_priv, reg);
- val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
+ if (enable)
+ val |= DBUF_POWER_REQUEST;
+ else
+ val &= ~DBUF_POWER_REQUEST;
intel_de_write(dev_priv, reg, val);
intel_de_posting_read(dev_priv, reg);
udelay(10);
- status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
- if ((enable && !status) || (!enable && status)) {
- drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
- enable ? "enable" : "disable");
- return false;
- }
- return true;
+ state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
+ drm_WARN(&dev_priv->drm, enable != state,
+ "DBuf slice %d power %s timeout!\n",
+ slice, enable ? "enable" : "disable");
}
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices)
{
- icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
-}
-
-static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
-{
- icl_dbuf_slices_update(dev_priv, 0);
-}
-
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
- u8 req_slices)
-{
- int i;
- int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ enum dbuf_slice slice;
- drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
- "Invalid number of dbuf slices requested\n");
+ drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1),
+ "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n",
+ req_slices, num_slices);
drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
req_slices);
@@ -4543,36 +4732,36 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
*/
mutex_lock(&power_domains->lock);
- for (i = 0; i < max_slices; i++) {
- intel_dbuf_slice_set(dev_priv,
- DBUF_CTL_S(i),
- (req_slices & BIT(i)) != 0);
- }
+ for (slice = DBUF_S1; slice < num_slices; slice++)
+ gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
- dev_priv->enabled_dbuf_slices_mask = req_slices;
+ dev_priv->dbuf.enabled_slices = req_slices;
mutex_unlock(&power_domains->lock);
}
-static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
- skl_ddb_get_hw_state(dev_priv);
+ dev_priv->dbuf.enabled_slices =
+ intel_enabled_dbuf_slices_mask(dev_priv);
+
/*
* Just power up at least 1 slice, we will
* figure out later which slices we have and what we need.
*/
- icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
- BIT(DBUF_S1));
+ gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
+ dev_priv->dbuf.enabled_slices);
}
-static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
- icl_dbuf_slices_update(dev_priv, 0);
+ gen9_dbuf_slices_update(dev_priv, 0);
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
- u32 mask, val;
+ unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
+ u32 mask, val, i;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
MBUS_ABOX_BT_CREDIT_POOL2_MASK |
@@ -4583,11 +4772,16 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
MBUS_ABOX_B_CREDIT(1) |
MBUS_ABOX_BW_CREDIT(1);
- intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
- if (INTEL_GEN(dev_priv) >= 12) {
- intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
- intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
- }
+ /*
+ * gen12 platforms that use abox1 and abox2 for pixel data reads still
+ * expect us to program the abox_ctl0 register as well, even though
+ * we don't have to program other instance-0 registers like BW_BUDDY.
+ */
+ if (IS_GEN(dev_priv, 12))
+ abox_regs |= BIT(0);
+
+ for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
+ intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
}
static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
@@ -5066,7 +5260,8 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
enum intel_dram_type type = dev_priv->dram_info.type;
u8 num_channels = dev_priv->dram_info.num_channels;
const struct buddy_page_mask *table;
- int i;
+ unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
+ int config, i;
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
/* Wa_1409767108: tgl */
@@ -5074,29 +5269,27 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
else
table = tgl_buddy_page_masks;
- for (i = 0; table[i].page_mask != 0; i++)
- if (table[i].num_channels == num_channels &&
- table[i].type == type)
+ for (config = 0; table[config].page_mask != 0; config++)
+ if (table[config].num_channels == num_channels &&
+ table[config].type == type)
break;
- if (table[i].page_mask == 0) {
+ if (table[config].page_mask == 0) {
drm_dbg(&dev_priv->drm,
"Unknown memory configuration; disabling address buddy logic.\n");
- intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
- intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
+ for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
+ intel_de_write(dev_priv, BW_BUDDY_CTL(i),
+ BW_BUDDY_DISABLE);
} else {
- intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
- table[i].page_mask);
- intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
- table[i].page_mask);
-
- /* Wa_22010178259:tgl */
- intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
- BW_BUDDY_TLB_REQ_TIMER_MASK,
- REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
- intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
- BW_BUDDY_TLB_REQ_TIMER_MASK,
- REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
+ for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
+ intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
+ table[config].page_mask);
+
+ /* Wa_22010178259:tgl,rkl */
+ intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ BW_BUDDY_TLB_REQ_TIMER(0x8));
+ }
}
}
@@ -5105,6 +5298,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
+ u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -5127,7 +5321,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_cdclk_init_hw(dev_priv);
/* 5. Enable DBUF. */
- icl_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(dev_priv);
/* 6. Setup MBUS. */
icl_mbus_init(dev_priv);
@@ -5138,6 +5332,13 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
+
+ /* Wa_14011508470 */
+ if (IS_GEN(dev_priv, 12)) {
+ val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
+ DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
+ intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
+ }
}
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -5150,7 +5351,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 1. Disable all display engine functions -> aready done */
/* 2. Disable DBUF */
- icl_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(dev_priv);
@@ -5375,7 +5576,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915_modparams.disable_power_well)
+ if (!i915->params.disable_power_well)
intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_power_domains_sync_hw(i915);
@@ -5399,7 +5600,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
fetch_and_zero(&i915->power_domains.wakeref);
/* Remove the refcount we took to keep power well support disabled. */
- if (!i915_modparams.disable_power_well)
+ if (!i915->params.disable_power_well)
intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
intel_display_power_flush_work_sync(i915);
@@ -5488,7 +5689,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
- if (!i915_modparams.disable_power_well)
+ if (!i915->params.disable_power_well)
intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
intel_display_power_flush_work(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 6c917699293b..54c20c76057e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -314,15 +314,16 @@ intel_display_power_put_async(struct drm_i915_private *i915,
enum dbuf_slice {
DBUF_S1,
DBUF_S2,
+ I915_MAX_DBUF_SLICES
};
+void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices);
+
#define with_intel_display_power(i915, domain, wf) \
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
- u8 req_slices);
-
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 2bf3d4cb4ea9..e8f809161c75 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -279,10 +279,10 @@ enum check_link_response {
*/
struct intel_hdcp_shim {
/* Outputs the transmitter's An and Aksv values to the receiver. */
- int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an);
+ int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an);
/* Reads the receiver's key selection vector */
- int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv);
+ int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv);
/*
* Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
@@ -290,52 +290,52 @@ struct intel_hdcp_shim {
* different. Call it BSTATUS since that's the name the HDMI spec
* uses and it was there first.
*/
- int (*read_bstatus)(struct intel_digital_port *intel_dig_port,
+ int (*read_bstatus)(struct intel_digital_port *dig_port,
u8 *bstatus);
/* Determines whether a repeater is present downstream */
- int (*repeater_present)(struct intel_digital_port *intel_dig_port,
+ int (*repeater_present)(struct intel_digital_port *dig_port,
bool *repeater_present);
/* Reads the receiver's Ri' value */
- int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri);
+ int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri);
/* Determines if the receiver's KSV FIFO is ready for consumption */
- int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port,
+ int (*read_ksv_ready)(struct intel_digital_port *dig_port,
bool *ksv_ready);
/* Reads the ksv fifo for num_downstream devices */
- int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port,
+ int (*read_ksv_fifo)(struct intel_digital_port *dig_port,
int num_downstream, u8 *ksv_fifo);
/* Reads a 32-bit part of V' from the receiver */
- int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port,
+ int (*read_v_prime_part)(struct intel_digital_port *dig_port,
int i, u32 *part);
/* Enables HDCP signalling on the port */
- int (*toggle_signalling)(struct intel_digital_port *intel_dig_port,
+ int (*toggle_signalling)(struct intel_digital_port *dig_port,
bool enable);
/* Ensures the link is still protected */
- bool (*check_link)(struct intel_digital_port *intel_dig_port);
+ bool (*check_link)(struct intel_digital_port *dig_port);
/* Detects panel's hdcp capability. This is optional for HDMI. */
- int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
+ int (*hdcp_capable)(struct intel_digital_port *dig_port,
bool *hdcp_capable);
/* HDCP adaptation(DP/HDMI) required on the port */
enum hdcp_wired_protocol protocol;
/* Detects whether sink is HDCP2.2 capable */
- int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port,
+ int (*hdcp_2_2_capable)(struct intel_digital_port *dig_port,
bool *capable);
/* Write HDCP2.2 messages */
- int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port,
+ int (*write_2_2_msg)(struct intel_digital_port *dig_port,
void *buf, size_t size);
/* Read HDCP2.2 messages */
- int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port,
+ int (*read_2_2_msg)(struct intel_digital_port *dig_port,
u8 msg_id, void *buf, size_t size);
/*
@@ -343,11 +343,11 @@ struct intel_hdcp_shim {
* type to Receivers. In DP HDCP2.2 Stream type is one of the input to
* the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
*/
- int (*config_stream_type)(struct intel_digital_port *intel_dig_port,
+ int (*config_stream_type)(struct intel_digital_port *dig_port,
bool is_repeater, u8 type);
/* HDCP2.2 Link Integrity Check */
- int (*check_2_2_link)(struct intel_digital_port *intel_dig_port);
+ int (*check_2_2_link)(struct intel_digital_port *dig_port);
};
struct intel_hdcp {
@@ -479,16 +479,6 @@ struct intel_atomic_state {
bool dpll_set, modeset;
- /*
- * Does this transaction change the pipes that are active? This mask
- * tracks which CRTC's have changed their active state at the end of
- * the transaction (not counting the temporary disable during modesets).
- * This mask should only be non-zero when intel_state->modeset is true,
- * but the converse is not necessarily true; simply changing a mode may
- * not flip the final active status of any CRTC's
- */
- u8 active_pipe_changes;
-
u8 active_pipes;
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -506,9 +496,6 @@ struct intel_atomic_state {
*/
bool global_state_changed;
- /* Number of enabled DBuf slices */
- u8 enabled_dbuf_slices_mask;
-
struct i915_sw_fence commit_ready;
struct llist_node freed;
@@ -643,8 +630,7 @@ struct intel_crtc_scaler_state {
int scaler_id;
};
-/* drm_mode->private_flags */
-#define I915_MODE_FLAG_INHERITED (1<<0)
+/* {crtc,crtc_state}->mode_flags */
/* Flag to get scanline using frame time stamps */
#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
/* Flag to use the scanline counter instead of the pixel counter */
@@ -841,6 +827,7 @@ struct intel_crtc_state {
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fifo_changed; /* FIFO split is changed */
bool preload_luts;
+ bool inherited; /* state inherited from BIOS? */
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
@@ -956,6 +943,9 @@ struct intel_crtc_state {
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
+ /* I915_MODE_FLAG_* */
+ u8 mode_flags;
+
u8 lane_count;
/*
@@ -1080,6 +1070,9 @@ struct intel_crtc_state {
/* Only valid on TGL+ */
enum transcoder mst_master_transcoder;
+
+ /* For DSB related info */
+ struct intel_dsb *dsb;
};
enum intel_pipe_crc_source {
@@ -1118,6 +1111,10 @@ struct intel_crtc {
*/
bool active;
u8 plane_ids_mask;
+
+ /* I915_MODE_FLAG_* */
+ u8 mode_flags;
+
unsigned long long enabled_power_domains;
struct intel_overlay *overlay;
@@ -1149,9 +1146,6 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
- /* per pipe DSB related info */
- struct intel_dsb dsb;
-
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc;
#endif
@@ -1373,6 +1367,9 @@ struct intel_dp {
void (*set_idle_link_train)(struct intel_dp *intel_dp);
void (*set_signal_levels)(struct intel_dp *intel_dp);
+ u8 (*preemph_max)(struct intel_dp *intel_dp);
+ u8 (*voltage_max)(struct intel_dp *intel_dp);
+
/* Displayport compliance testing */
struct intel_dp_compliance compliance;
@@ -1437,9 +1434,9 @@ struct intel_dp_mst_encoder {
};
static inline enum dpio_channel
-vlv_dport_to_channel(struct intel_digital_port *dport)
+vlv_dig_port_to_channel(struct intel_digital_port *dig_port)
{
- switch (dport->base.port) {
+ switch (dig_port->base.port) {
case PORT_B:
case PORT_D:
return DPIO_CH0;
@@ -1451,9 +1448,9 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
}
static inline enum dpio_phy
-vlv_dport_to_phy(struct intel_digital_port *dport)
+vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
{
- switch (dport->base.port) {
+ switch (dig_port->base.port) {
case PORT_B:
case PORT_C:
return DPIO_PHY0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ed9e53c373a7..d6295eb20b63 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -140,9 +140,9 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4};
*/
bool intel_dp_is_edp(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
+ return dig_port->base.type == INTEL_OUTPUT_EDP;
}
static void intel_dp_link_down(struct intel_encoder *encoder,
@@ -216,10 +216,10 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- int source_max = intel_dig_port->max_lanes;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ int source_max = dig_port->max_lanes;
int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
- int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
+ int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
return min3(source_max, sink_max, fia_max);
}
@@ -251,8 +251,8 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
static int
intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int max_dotclk = dev_priv->max_dotclk_freq;
int ds_max_dotclk;
@@ -409,7 +409,10 @@ static int intel_dp_rate_index(const int *rates, int len, int rate)
static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
{
- WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_WARN_ON(&i915->drm,
+ !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
intel_dp->num_source_rates,
@@ -418,7 +421,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
intel_dp->common_rates);
/* Paranoia, there should always be something in common. */
- if (WARN_ON(intel_dp->num_common_rates == 0)) {
+ if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
intel_dp->common_rates[0] = 162000;
intel_dp->num_common_rates = 1;
}
@@ -465,6 +468,15 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int index;
+ /*
+ * TODO: Enable fallback on MST links once MST link compute can handle
+ * the fallback params.
+ */
+ if (intel_dp->is_mst) {
+ drm_err(&i915->drm, "Link Training Unsuccessful\n");
+ return -1;
+ }
+
index = intel_dp_rate_index(intel_dp->common_rates,
intel_dp->num_common_rates,
link_rate);
@@ -766,7 +778,7 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@@ -776,14 +788,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (drm_WARN(&dev_priv->drm,
intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
"skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name))
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name))
return;
drm_dbg_kms(&dev_priv->drm,
"kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@@ -879,7 +891,7 @@ static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -908,8 +920,8 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
drm_dbg_kms(&dev_priv->drm,
"picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
pipe_name(intel_dp->pps_pipe),
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -997,8 +1009,8 @@ static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->base.port;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum port port = dig_port->base.port;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1019,15 +1031,15 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
if (intel_dp->pps_pipe == INVALID_PIPE) {
drm_dbg_kms(&dev_priv->drm,
"no initial power sequencer for [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
return;
}
drm_dbg_kms(&dev_priv->drm,
"initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name,
+ dig_port->base.base.base.id,
+ dig_port->base.base.name,
pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -1292,9 +1304,9 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 aux_clock_divider)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
- to_i915(intel_dig_port->base.base.dev);
+ to_i915(dig_port->base.base.dev);
u32 precharge, timeout;
if (IS_GEN(dev_priv, 6))
@@ -1322,10 +1334,10 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 unused)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *i915 =
- to_i915(intel_dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
+ to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
u32 ret;
ret = DP_AUX_CH_CTL_SEND_BUSY |
@@ -1339,7 +1351,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
if (intel_phy_is_tc(i915, phy) &&
- intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
+ dig_port->tc_mode == TC_PORT_TBT_ALT)
ret |= DP_AUX_CH_CTL_TBT_IO;
return ret;
@@ -1351,11 +1363,11 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u8 *recv, int recv_size,
u32 aux_send_ctl_flags)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *i915 =
- to_i915(intel_dig_port->base.base.dev);
+ to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
- enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
@@ -1372,9 +1384,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
if (is_tc_port)
- intel_tc_port_lock(intel_dig_port);
+ intel_tc_port_lock(dig_port);
- aux_domain = intel_aux_power_domain(intel_dig_port);
+ aux_domain = intel_aux_power_domain(dig_port);
aux_wakeref = intel_display_power_get(i915, aux_domain);
pps_wakeref = pps_lock(intel_dp);
@@ -1533,7 +1545,7 @@ out:
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
if (is_tc_port)
- intel_tc_port_unlock(intel_dig_port);
+ intel_tc_port_unlock(dig_port);
return ret;
}
@@ -1555,6 +1567,7 @@ static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 txbuf[20], rxbuf[20];
size_t txsize, rxsize;
int ret;
@@ -1568,10 +1581,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
rxsize = 2; /* 0 or 1 data bytes */
- if (WARN_ON(txsize > 20))
+ if (drm_WARN_ON(&i915->drm, txsize > 20))
return -E2BIG;
- WARN_ON(!msg->buffer != !msg->size);
+ drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
@@ -1596,7 +1609,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
rxsize = msg->size + 1;
- if (WARN_ON(rxsize > 20))
+ if (drm_WARN_ON(&i915->drm, rxsize > 20))
return -E2BIG;
ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
@@ -1871,10 +1884,11 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
int
intel_dp_max_link_rate(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int len;
len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
- if (WARN_ON(len <= 0))
+ if (drm_WARN_ON(&i915->drm, len <= 0))
return 162000;
return intel_dp->common_rates[len - 1];
@@ -1882,10 +1896,11 @@ intel_dp_max_link_rate(struct intel_dp *intel_dp)
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i = intel_dp_rate_index(intel_dp->sink_rates,
intel_dp->num_sink_rates, rate);
- if (WARN_ON(i < 0))
+ if (drm_WARN_ON(&i915->drm, i < 0))
i = 0;
return i;
@@ -2876,7 +2891,7 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
@@ -2893,11 +2908,11 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
return need_to_disable;
intel_display_power_get(dev_priv,
- intel_aux_power_domain(intel_dig_port));
+ intel_aux_power_domain(dig_port));
drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -2919,8 +2934,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (!edp_have_panel_power(intel_dp)) {
drm_dbg_kms(&dev_priv->drm,
"[ENCODER:%d:%s] panel power wasn't enabled\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
msleep(intel_dp->panel_power_up_delay);
}
@@ -2953,7 +2968,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *intel_dig_port =
+ struct intel_digital_port *dig_port =
dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -2966,8 +2981,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
return;
drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -2987,7 +3002,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
intel_dp->panel_power_off_time = ktime_get_boottime();
intel_display_power_put_unchecked(dev_priv,
- intel_aux_power_domain(intel_dig_port));
+ intel_aux_power_domain(dig_port));
}
static void edp_panel_vdd_work(struct work_struct *__work)
@@ -3818,8 +3833,8 @@ static void g4x_pre_enable_dp(struct intel_atomic_state *state,
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
@@ -3841,8 +3856,8 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
*/
drm_dbg_kms(&dev_priv->drm,
"detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
intel_de_write(dev_priv, pp_on_reg, 0);
intel_de_posting_read(dev_priv, pp_on_reg);
@@ -3984,70 +3999,24 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATU
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
-/* These are source-specific values. */
-u8
-intel_dp_voltage_max(struct intel_dp *intel_dp)
+static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- enum port port = encoder->port;
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+}
- if (HAS_DDI(dev_priv))
- return intel_ddi_dp_voltage_max(encoder);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp)
+{
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
}
-u8
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
+static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- enum port port = encoder->port;
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+}
- if (HAS_DDI(dev_priv)) {
- return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
- } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
- } else {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- return DP_TRAIN_PRE_EMPH_LEVEL_1;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- default:
- return DP_TRAIN_PRE_EMPH_LEVEL_0;
- }
- }
+static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp)
+{
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
static void vlv_set_signal_levels(struct intel_dp *intel_dp)
@@ -4330,6 +4299,7 @@ static u32 ivb_cpu_edp_signal_levels(u8 train_set)
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
return EDP_LINK_TRAIN_400MV_6DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -4746,7 +4716,9 @@ intel_dp_sink_can_mst(struct intel_dp *intel_dp)
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
- return i915_modparams.enable_dp_mst &&
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return i915->params.enable_dp_mst &&
intel_dp->can_mst &&
intel_dp_sink_can_mst(intel_dp);
}
@@ -4763,13 +4735,13 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
encoder->base.base.id, encoder->base.name,
yesno(intel_dp->can_mst), yesno(sink_can_mst),
- yesno(i915_modparams.enable_dp_mst));
+ yesno(i915->params.enable_dp_mst));
if (!intel_dp->can_mst)
return;
intel_dp->is_mst = sink_can_mst &&
- i915_modparams.enable_dp_mst;
+ i915->params.enable_dp_mst;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
@@ -4951,7 +4923,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct dp_sdp sdp = {};
ssize_t len;
@@ -4977,14 +4949,14 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
if (drm_WARN_ON(&dev_priv->drm, len < 0))
return;
- intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
+ dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
}
void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
struct drm_dp_vsc_sdp *vsc)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct dp_sdp sdp = {};
ssize_t len;
@@ -4994,7 +4966,7 @@ void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
if (drm_WARN_ON(&dev_priv->drm, len < 0))
return;
- intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
+ dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
&sdp, len);
}
@@ -5154,7 +5126,7 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_dp_vsc_sdp *vsc)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = DP_SDP_VSC;
@@ -5169,7 +5141,7 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(type)) == 0)
return;
- intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
+ dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
@@ -5181,7 +5153,7 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
struct intel_crtc_state *crtc_state,
struct hdmi_drm_infoframe *drm_infoframe)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
struct dp_sdp sdp = {};
@@ -5191,8 +5163,8 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
intel_hdmi_infoframe_enable(type)) == 0)
return;
- intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
- sizeof(sdp));
+ dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
+ sizeof(sdp));
ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
sizeof(sdp));
@@ -5394,10 +5366,10 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv =
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_dp_phy_test_params *data =
&intel_dp->compliance.test_data.phytest;
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum pipe pipe = crtc->pipe;
u32 pattern_val;
@@ -5459,10 +5431,10 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
static void
intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum pipe pipe = crtc->pipe;
u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
@@ -5485,11 +5457,11 @@ intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
static void
intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->base.port;
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum port port = dig_port->base.port;
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum pipe pipe = crtc->pipe;
u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
@@ -5595,35 +5567,46 @@ update_status:
"Could not write test response to sink\n");
}
-static int
+/**
+ * intel_dp_check_mst_status - service any pending MST interrupts, check link status
+ * @intel_dp: Intel DP struct
+ *
+ * Read any pending MST interrupts, call MST core to handle these and ack the
+ * interrupts. Check if the main and AUX link state is ok.
+ *
+ * Returns:
+ * - %true if pending interrupts were serviced (or no interrupts were
+ * pending) w/o detecting an error condition.
+ * - %false if an error condition - like AUX failure or a loss of link - is
+ * detected, which needs servicing from the hotplug work.
+ */
+static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- bool need_retrain = false;
-
- if (!intel_dp->is_mst)
- return -EINVAL;
+ bool link_ok = true;
- WARN_ON_ONCE(intel_dp->active_mst_links < 0);
+ drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
for (;;) {
u8 esi[DP_DPRX_ESI_LEN] = {};
- bool bret, handled;
+ bool handled;
int retry;
- bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
- if (!bret) {
+ if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
drm_dbg_kms(&i915->drm,
"failed to get ESI - device may have failed\n");
- return -EINVAL;
+ link_ok = false;
+
+ break;
}
/* check link status - esi[10] = 0x200c */
- if (intel_dp->active_mst_links > 0 && !need_retrain &&
+ if (intel_dp->active_mst_links > 0 && link_ok &&
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
drm_dbg_kms(&i915->drm,
"channel EQ not ok, retraining\n");
- need_retrain = true;
+ link_ok = false;
}
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
@@ -5643,7 +5626,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
}
}
- return need_retrain;
+ return link_ok;
}
static bool
@@ -5966,7 +5949,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
u8 *dpcd = intel_dp->dpcd;
u8 type;
- if (WARN_ON(intel_dp_is_edp(intel_dp)))
+ if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
return connector_status_connected;
if (lspcon->active)
@@ -6191,7 +6174,17 @@ intel_dp_detect(struct drm_connector *connector,
goto out;
}
- if (intel_dp->reset_link_params) {
+ /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
+ if (INTEL_GEN(dev_priv) >= 11)
+ intel_dp_get_dsc_sink_cap(intel_dp);
+
+ intel_dp_configure_mst(intel_dp);
+
+ /*
+ * TODO: Reset link params when switching to MST mode, until MST
+ * supports link training fallback params.
+ */
+ if (intel_dp->reset_link_params || intel_dp->is_mst) {
/* Initial max link lane count */
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
@@ -6203,12 +6196,6 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp_print_rates(intel_dp);
- /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
- if (INTEL_GEN(dev_priv) >= 11)
- intel_dp_get_dsc_sink_cap(intel_dp);
-
- intel_dp_configure_mst(intel_dp);
-
if (intel_dp->is_mst) {
/*
* If we are in MST mode then this connector
@@ -6345,10 +6332,10 @@ intel_dp_connector_unregister(struct drm_connector *connector)
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
+ struct intel_dp *intel_dp = &dig_port->dp;
- intel_dp_mst_encoder_cleanup(intel_dig_port);
+ intel_dp_mst_encoder_cleanup(dig_port);
if (intel_dp_is_edp(intel_dp)) {
intel_wakeref_t wakeref;
@@ -6407,11 +6394,11 @@ static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
}
static
-int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
u8 *an)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base));
static const struct drm_dp_aux_msg msg = {
.request = DP_AUX_NATIVE_WRITE,
.address = DP_AUX_HDCP_AKSV,
@@ -6422,7 +6409,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
int ret;
/* Output An first, that's easy */
- dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
+ dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
an, DRM_HDCP_AN_LEN);
if (dpcd_ret != DRM_HDCP_AN_LEN) {
drm_dbg_kms(&i915->drm,
@@ -6461,13 +6448,13 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
return 0;
}
-static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
+static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret != DRM_HDCP_KSV_LEN) {
drm_dbg_kms(&i915->drm,
@@ -6477,10 +6464,10 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
return 0;
}
-static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
+static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
u8 *bstatus)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
/*
@@ -6488,7 +6475,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
* definition by different names. In the HDMI spec, it's called BSTATUS,
* but in DP it's called BINFO.
*/
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret != DRM_HDCP_BSTATUS_LEN) {
drm_dbg_kms(&i915->drm,
@@ -6499,13 +6486,13 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
u8 *bcaps)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
drm_dbg_kms(&i915->drm,
@@ -6517,13 +6504,13 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
bool *repeater_present)
{
ssize_t ret;
u8 bcaps;
- ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
if (ret)
return ret;
@@ -6532,13 +6519,13 @@ int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
u8 *ri_prime)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret != DRM_HDCP_RI_LEN) {
drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
@@ -6549,14 +6536,14 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
bool *ksv_ready)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
u8 bstatus;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
drm_dbg_kms(&i915->drm,
@@ -6568,17 +6555,17 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
int num_downstream, u8 *ksv_fifo)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
int i;
/* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
for (i = 0; i < num_downstream; i += 3) {
size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_AUX_HDCP_KSV_FIFO,
ksv_fifo + i * DRM_HDCP_KSV_LEN,
len);
@@ -6593,16 +6580,16 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
int i, u32 *part)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
return -EINVAL;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_AUX_HDCP_V_PRIME(i), part,
DRM_HDCP_V_PRIME_PART_LEN);
if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
@@ -6614,7 +6601,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
bool enable)
{
/* Not used for single stream DisplayPort setups */
@@ -6622,13 +6609,13 @@ int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
}
static
-bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
+bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
u8 bstatus;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
drm_dbg_kms(&i915->drm,
@@ -6640,13 +6627,13 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
}
static
-int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
bool *hdcp_capable)
{
ssize_t ret;
u8 bcaps;
- ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
if (ret)
return ret;
@@ -6704,13 +6691,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
};
static int
-intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
u8 *rx_status)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
HDCP_2_2_DP_RXSTATUS_LEN);
if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
@@ -6723,14 +6710,14 @@ intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
}
static
-int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
u8 msg_id, bool *msg_ready)
{
u8 rx_status;
int ret;
*msg_ready = false;
- ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
if (ret < 0)
return ret;
@@ -6756,11 +6743,11 @@ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
}
static ssize_t
-intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_dp *dp = &intel_dig_port->dp;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *dp = &dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
int ret, timeout;
@@ -6784,7 +6771,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
* the timeout at wait for CP_IRQ.
*/
intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
- ret = hdcp2_detect_msg_availability(intel_dig_port,
+ ret = hdcp2_detect_msg_availability(dig_port,
msg_id, &msg_ready);
if (!msg_ready)
ret = -ETIMEDOUT;
@@ -6810,10 +6797,10 @@ static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
}
static
-int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
void *buf, size_t size)
{
- struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_dp *dp = &dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
@@ -6836,7 +6823,7 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
- ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_write(&dig_port->dp.aux,
offset, (void *)byte, len);
if (ret < 0)
return ret;
@@ -6850,13 +6837,13 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
}
static
-ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
+ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
{
u8 rx_info[HDCP_2_2_RXINFO_LEN];
u32 dev_cnt;
ssize_t ret;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_HDCP_2_2_REG_RXINFO_OFFSET,
(void *)rx_info, HDCP_2_2_RXINFO_LEN);
if (ret != HDCP_2_2_RXINFO_LEN)
@@ -6876,10 +6863,10 @@ ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
}
static
-int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
u8 msg_id, void *buf, size_t size)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
@@ -6890,12 +6877,12 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
return -EINVAL;
offset = hdcp2_msg_data->offset;
- ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
+ ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data);
if (ret < 0)
return ret;
if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
- ret = get_receiver_id_list_size(intel_dig_port);
+ ret = get_receiver_id_list_size(dig_port);
if (ret < 0)
return ret;
@@ -6910,7 +6897,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
(void *)byte, len);
if (ret < 0) {
drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
@@ -6929,7 +6916,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
bool is_repeater, u8 content_type)
{
int ret;
@@ -6948,7 +6935,7 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
stream_type_msg.stream_type = content_type;
- ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+ ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg,
sizeof(stream_type_msg));
return ret < 0 ? ret : 0;
@@ -6956,12 +6943,12 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
}
static
-int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
{
u8 rx_status;
int ret;
- ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
if (ret)
return ret;
@@ -6976,14 +6963,14 @@ int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
}
static
-int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
bool *capable)
{
u8 rx_caps[3];
int ret;
*capable = false;
- ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
rx_caps, HDCP_2_2_RXCAPS_LEN);
if (ret != HDCP_2_2_RXCAPS_LEN)
@@ -7262,12 +7249,12 @@ static bool intel_edp_have_power(struct intel_dp *intel_dp)
}
enum irqreturn
-intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *intel_dp = &dig_port->dp;
- if (intel_dig_port->base.type == INTEL_OUTPUT_EDP &&
+ if (dig_port->base.type == INTEL_OUTPUT_EDP &&
(long_hpd || !intel_edp_have_power(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
@@ -7278,14 +7265,14 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
drm_dbg_kms(&i915->drm,
"ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
long_hpd ? "long" : "short",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
return IRQ_HANDLED;
}
drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name,
+ dig_port->base.base.base.id,
+ dig_port->base.base.name,
long_hpd ? "long" : "short");
if (long_hpd) {
@@ -7294,35 +7281,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (intel_dp->is_mst) {
- switch (intel_dp_check_mst_status(intel_dp)) {
- case -EINVAL:
- /*
- * If we were in MST mode, and device is not
- * there, get out of MST mode
- */
- drm_dbg_kms(&i915->drm,
- "MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst,
- intel_dp->mst_mgr.mst_state);
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
-
- return IRQ_NONE;
- case 1:
- return IRQ_NONE;
- default:
- break;
- }
- }
-
- if (!intel_dp->is_mst) {
- bool handled;
-
- handled = intel_dp_short_pulse(intel_dp);
-
- if (!handled)
+ if (!intel_dp_check_mst_status(intel_dp))
return IRQ_NONE;
+ } else if (!intel_dp_short_pulse(intel_dp)) {
+ return IRQ_NONE;
}
return IRQ_HANDLED;
@@ -7694,7 +7656,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
+ if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
refresh_rate)
index = DRRS_LOW_RR;
@@ -7807,7 +7769,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp,
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv, old_crtc_state,
- intel_dp->attached_connector->panel.fixed_mode->vrefresh);
+ drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
dev_priv->drrs.dp = NULL;
mutex_unlock(&dev_priv->drrs.mutex);
@@ -7840,7 +7802,7 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- intel_dp->attached_connector->panel.downclock_mode->vrefresh);
+ drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
}
unlock:
@@ -7860,6 +7822,7 @@ unlock:
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits)
{
+ struct intel_dp *intel_dp;
struct drm_crtc *crtc;
enum pipe pipe;
@@ -7869,12 +7832,14 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
cancel_delayed_work(&dev_priv->drrs.work);
mutex_lock(&dev_priv->drrs.mutex);
- if (!dev_priv->drrs.dp) {
+
+ intel_dp = dev_priv->drrs.dp;
+ if (!intel_dp) {
mutex_unlock(&dev_priv->drrs.mutex);
return;
}
- crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+ crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
@@ -7883,7 +7848,7 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
/* invalidate means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
+ drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
mutex_unlock(&dev_priv->drrs.mutex);
}
@@ -7903,6 +7868,7 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits)
{
+ struct intel_dp *intel_dp;
struct drm_crtc *crtc;
enum pipe pipe;
@@ -7912,12 +7878,14 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
cancel_delayed_work(&dev_priv->drrs.work);
mutex_lock(&dev_priv->drrs.mutex);
- if (!dev_priv->drrs.dp) {
+
+ intel_dp = dev_priv->drrs.dp;
+ if (!intel_dp) {
mutex_unlock(&dev_priv->drrs.mutex);
return;
}
- crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+ crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
@@ -7926,7 +7894,7 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
/* flush means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
+ drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
/*
* flush also means no more activity hence schedule downclock, if all
@@ -8167,12 +8135,12 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
}
bool
-intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
@@ -8183,9 +8151,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
INIT_WORK(&intel_connector->modeset_retry_work,
intel_dp_modeset_retry_work_fn);
- if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
+ if (drm_WARN(dev, dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
return false;
@@ -8256,12 +8224,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
- intel_dp_mst_encoder_init(intel_dig_port,
+ intel_dp_mst_encoder_init(dig_port,
intel_connector->base.base.id);
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
- intel_dp_mst_encoder_cleanup(intel_dig_port);
+ intel_dp_mst_encoder_cleanup(dig_port);
goto fail;
}
@@ -8296,20 +8264,20 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
i915_reg_t output_reg,
enum port port)
{
- struct intel_digital_port *intel_dig_port;
+ struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
- intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
- if (!intel_dig_port)
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
return false;
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
- intel_encoder = &intel_dig_port->base;
+ intel_encoder = &dig_port->base;
encoder = &intel_encoder->base;
if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -8345,25 +8313,34 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A))
- intel_dig_port->dp.set_link_train = cpt_set_link_train;
+ dig_port->dp.set_link_train = cpt_set_link_train;
else
- intel_dig_port->dp.set_link_train = g4x_set_link_train;
+ dig_port->dp.set_link_train = g4x_set_link_train;
if (IS_CHERRYVIEW(dev_priv))
- intel_dig_port->dp.set_signal_levels = chv_set_signal_levels;
+ dig_port->dp.set_signal_levels = chv_set_signal_levels;
else if (IS_VALLEYVIEW(dev_priv))
- intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels;
+ dig_port->dp.set_signal_levels = vlv_set_signal_levels;
else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
- intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
+ dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
else if (IS_GEN(dev_priv, 6) && port == PORT_A)
- intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
+ dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
else
- intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+ dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
+ (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
+ dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
+ dig_port->dp.voltage_max = intel_dp_voltage_max_3;
+ } else {
+ dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
+ dig_port->dp.voltage_max = intel_dp_voltage_max_2;
+ }
- intel_dig_port->dp.output_reg = output_reg;
- intel_dig_port->max_lanes = 4;
- intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
- intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
+ dig_port->dp.output_reg = output_reg;
+ dig_port->max_lanes = 4;
+ dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
+ dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
intel_encoder->type = INTEL_OUTPUT_DP;
intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -8378,25 +8355,25 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->cloneable = 0;
intel_encoder->port = port;
- intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (HAS_GMCH(dev_priv)) {
if (IS_GM45(dev_priv))
- intel_dig_port->connected = gm45_digital_port_connected;
+ dig_port->connected = gm45_digital_port_connected;
else
- intel_dig_port->connected = g4x_digital_port_connected;
+ dig_port->connected = g4x_digital_port_connected;
} else {
if (port == PORT_A)
- intel_dig_port->connected = ilk_digital_port_connected;
+ dig_port->connected = ilk_digital_port_connected;
else
- intel_dig_port->connected = ibx_digital_port_connected;
+ dig_port->connected = ibx_digital_port_connected;
}
if (port != PORT_A)
- intel_infoframe_init(intel_dig_port);
+ intel_infoframe_init(dig_port);
- intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
- if (!intel_dp_init_connector(intel_dig_port, intel_connector))
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ if (!intel_dp_init_connector(dig_port, intel_connector))
goto err_init_connector;
return true;
@@ -8406,7 +8383,7 @@ err_init_connector:
err_encoder_init:
kfree(intel_connector);
err_connector_alloc:
- kfree(intel_dig_port);
+ kfree(dig_port);
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 1702959ca079..b901ab850cbd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -40,7 +40,7 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
enum pipe *pipe);
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
enum port port);
-bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+bool intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, u8 lane_count,
@@ -61,7 +61,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
-enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
bool long_hpd);
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -92,10 +92,6 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
-u8
-intel_dp_voltage_max(struct intel_dp *intel_dp);
-u8
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 0722540d64ad..acbd7eb66cbe 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -348,7 +348,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- if (i915_modparams.enable_dpcd_backlight == 0 ||
+ if (i915->params.enable_dpcd_backlight == 0 ||
!intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
@@ -358,7 +358,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
*/
if (i915->vbt.backlight.type !=
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
- i915_modparams.enable_dpcd_backlight != 1 &&
+ i915->params.enable_dpcd_backlight != 1 &&
!drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
drm_info(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index e4f1843170b7..a23ed7290843 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,9 +34,25 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
+static u8 dp_voltage_max(u8 preemph)
+{
+ switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ default:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ }
+}
+
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 v = 0;
u8 p = 0;
int lane;
@@ -44,23 +60,28 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
u8 preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
- u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
-
- if (this_v > v)
- v = this_v;
- if (this_p > p)
- p = this_p;
+ v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
+ p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
}
- voltage_max = intel_dp_voltage_max(intel_dp);
- if (v >= voltage_max)
- v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+ preemph_max = intel_dp->preemph_max(intel_dp);
+ drm_WARN_ON_ONCE(&i915->drm,
+ preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
+ preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
- preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ v = min(v, dp_voltage_max(p));
+
+ voltage_max = intel_dp->voltage_max(intel_dp);
+ drm_WARN_ON_ONCE(&i915->drm,
+ voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
+ voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
+
+ if (v >= voltage_max)
+ v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index f29e51ce489c..a2d91a499700 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -33,6 +33,7 @@
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
+#include "intel_hotplug.h"
#include "intel_dp.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
@@ -316,14 +317,33 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
return ret;
}
+static void clear_act_sent(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ intel_de_write(i915, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_ACT_SENT);
+}
+
+static void wait_for_act_sent(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (intel_de_wait_for_set(i915, intel_dp->regs.dp_tp_status,
+ DP_TP_STATUS_ACT_SENT, 1))
+ drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
+
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
+}
+
static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
@@ -349,8 +369,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -369,6 +389,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+ clear_act_sent(intel_dp);
+
val = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
@@ -376,11 +398,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
val);
- if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
- DP_TP_STATUS_ACT_SENT, 1))
- drm_err(&dev_priv->drm,
- "Timed out waiting for ACT sent when disabling\n");
- drm_dp_check_act_status(&intel_dp->mst_mgr);
+ wait_for_act_sent(intel_dp);
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
@@ -403,7 +421,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* the transcoder clock select is set to none.
*/
if (last_mst_stream)
- intel_dp_set_infoframes(&intel_dig_port->base, false,
+ intel_dp_set_infoframes(&dig_port->base, false,
old_crtc_state, NULL);
/*
* From TGL spec: "If multi-stream slave transcoder: Configure
@@ -418,7 +436,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_mst->connector = NULL;
if (last_mst_stream)
- intel_dig_port->base.post_disable(state, &intel_dig_port->base,
+ dig_port->base.post_disable(state, &dig_port->base,
old_crtc_state, NULL);
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
@@ -431,11 +449,11 @@ static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
if (intel_dp->active_mst_links == 0)
- intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base,
+ dig_port->base.pre_pll_enable(state, &dig_port->base,
pipe_config, NULL);
}
@@ -445,13 +463,12 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
- u32 temp;
bool first_mst_stream;
/* MST encoders are bound to a crtc, not to a connector,
@@ -473,7 +490,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
if (first_mst_stream)
- intel_dig_port->base.pre_enable(state, &intel_dig_port->base,
+ dig_port->base.pre_enable(state, &dig_port->base,
pipe_config, NULL);
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
@@ -484,14 +501,12 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
intel_dp->active_mst_links++;
- temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
- intel_de_write(dev_priv, intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
/*
* Before Gen 12 this is not done as part of
- * intel_dig_port->base.pre_enable() and should be done here. For
+ * dig_port->base.pre_enable() and should be done here. For
* Gen 12+ the step in which this should be done is different for the
* first MST stream, so it's done on the DDI for the first stream and
* here for the following ones.
@@ -510,22 +525,28 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+ clear_act_sent(intel_dp);
+
intel_ddi_enable_transcoder_func(encoder, pipe_config);
+ val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+ val |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder),
+ val);
+
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
intel_dp->active_mst_links);
- if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
- DP_TP_STATUS_ACT_SENT, 1))
- drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n");
-
- drm_dp_check_act_status(&intel_dp->mst_mgr);
+ wait_for_act_sent(intel_dp);
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
@@ -551,9 +572,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_digital_port *dig_port = intel_mst->primary;
- intel_ddi_get_config(&intel_dig_port->base, pipe_config);
+ intel_ddi_get_config(&dig_port->base, pipe_config);
}
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
@@ -618,39 +639,60 @@ static int intel_dp_mst_get_modes(struct drm_connector *connector)
return intel_dp_mst_get_ddc_modes(connector);
}
-static enum drm_mode_status
-intel_dp_mst_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static int
+intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
+ struct drm_dp_mst_port *port = intel_connector->port;
+ const int min_bpp = 18;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
+ int ret;
- if (drm_connector_is_unregistered(connector))
- return MODE_ERROR;
+ if (drm_connector_is_unregistered(connector)) {
+ *status = MODE_ERROR;
+ return 0;
+ }
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ *status = MODE_NO_DBLESCAN;
+ return 0;
+ }
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
- mode_rate = intel_dp_link_required(mode->clock, 18);
+ mode_rate = intel_dp_link_required(mode->clock, min_bpp);
- /* TODO - validate mode against available PBN for link */
- if (mode->clock < 10000)
- return MODE_CLOCK_LOW;
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ return ret;
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- return MODE_H_ILLEGAL;
+ if (mode_rate > max_rate || mode->clock > max_dotclk ||
+ drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
+ *status = MODE_CLOCK_HIGH;
+ return 0;
+ }
- if (mode_rate > max_rate || mode->clock > max_dotclk)
- return MODE_CLOCK_HIGH;
+ if (mode->clock < 10000) {
+ *status = MODE_CLOCK_LOW;
+ return 0;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ *status = MODE_H_ILLEGAL;
+ return 0;
+ }
- return intel_mode_valid_max_plane_size(dev_priv, mode);
+ *status = intel_mode_valid_max_plane_size(dev_priv, mode);
+ return 0;
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
@@ -679,7 +721,7 @@ intel_dp_mst_detect(struct drm_connector *connector,
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
- .mode_valid = intel_dp_mst_mode_valid,
+ .mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
.detect_ctx = intel_dp_mst_detect,
@@ -711,8 +753,8 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *intel_connector;
struct drm_connector *connector;
@@ -773,16 +815,25 @@ err:
return NULL;
}
+static void
+intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+
+ intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
+}
+
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
+ .poll_hpd_irq = intel_dp_mst_poll_hpd_irq,
};
static struct intel_dp_mst_encoder *
-intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
+intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
{
struct intel_dp_mst_encoder *intel_mst;
struct intel_encoder *intel_encoder;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_device *dev = dig_port->base.base.dev;
intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
@@ -791,14 +842,14 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_mst->pipe = pipe;
intel_encoder = &intel_mst->base;
- intel_mst->primary = intel_dig_port;
+ intel_mst->primary = dig_port;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
- intel_encoder->power_domain = intel_dig_port->base.power_domain;
- intel_encoder->port = intel_dig_port->base.port;
+ intel_encoder->power_domain = dig_port->base.power_domain;
+ intel_encoder->port = dig_port->base.port;
intel_encoder->cloneable = 0;
/*
* This is wrong, but broken userspace uses the intersection
@@ -825,29 +876,29 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
}
static bool
-intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
+intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
{
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe)
- intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
+ intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
return true;
}
int
-intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
+intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
{
- return intel_dig_port->dp.active_mst_links;
+ return dig_port->dp.active_mst_links;
}
int
-intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
+intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- enum port port = intel_dig_port->base.port;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *intel_dp = &dig_port->dp;
+ enum port port = dig_port->base.port;
int ret;
if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
@@ -862,7 +913,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
intel_dp->mst_mgr.cbs = &mst_cbs;
/* create encoders */
- intel_dp_create_fake_mst_encoders(intel_dig_port);
+ intel_dp_create_fake_mst_encoders(dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
&intel_dp->aux, 16, 3, conn_base_id);
if (ret)
@@ -874,9 +925,9 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
}
void
-intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
+intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
{
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_dp *intel_dp = &dig_port->dp;
if (!intel_dp->can_mst)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 854724f68f09..6afda4e86b3c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -11,9 +11,9 @@
struct intel_digital_port;
struct intel_crtc_state;
-int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
-void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
-int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
+int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
+int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 399a7edb4568..7910522273b2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -650,9 +650,9 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
bool uniq_trans_scale)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = intel_crtc->pipe;
u32 val;
int i;
@@ -746,7 +746,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -789,10 +789,10 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(crtc_state->lane_count);
@@ -803,7 +803,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* Otherwise we can't even access the PLL.
*/
if (ch == DPIO_CH0 && pipe == PIPE_B)
- dport->release_cl2_override =
+ dig_port->release_cl2_override =
!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
chv_phy_powergate_lanes(encoder, true, lane_mask);
@@ -870,10 +870,10 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
int data, i, stagger;
u32 val;
@@ -948,12 +948,12 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (dport->release_cl2_override) {
+ if (dig_port->release_cl2_override) {
chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
- dport->release_cl2_override = false;
+ dig_port->release_cl2_override = false;
}
}
@@ -997,8 +997,8 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
- enum dpio_channel port = vlv_dport_to_channel(dport);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = intel_crtc->pipe;
vlv_dpio_get(dev_priv);
@@ -1022,10 +1022,10 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
/* Program Tx lane resets to default */
@@ -1052,10 +1052,10 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -1081,10 +1081,10 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
vlv_dpio_get(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index b45185b80bec..aeb6ee395cce 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -2934,6 +2934,15 @@ static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
.dco_integer = 0x43, .dco_fraction = 0x4000,
/* the following params are unused */
+};
+
+/*
+ * Display WA #22010492432: tgl
+ * Divide the nominal .dco_fraction value by 2.
+ */
+static const struct skl_wrpll_params tgl_tbt_pll_38_4MHz_values = {
+ .dco_integer = 0x54, .dco_fraction = 0x1800,
+ /* the following params are unused */
.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
};
@@ -2970,12 +2979,14 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
- case 38400:
*pll_params = tgl_tbt_pll_19_2MHz_values;
break;
case 24000:
*pll_params = tgl_tbt_pll_24MHz_values;
break;
+ case 38400:
+ *pll_params = tgl_tbt_pll_38_4MHz_values;
+ break;
}
} else {
switch (dev_priv->dpll.ref_clks.nssc) {
@@ -3038,49 +3049,26 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
icl_wrpll_ref_clock(i915));
}
-static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder,
+static void icl_calc_dpll_state(struct drm_i915_private *i915,
+ const struct skl_wrpll_params *pll_params,
struct intel_dpll_hw_state *pll_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 cfgcr0, cfgcr1;
- struct skl_wrpll_params pll_params = { 0 };
- bool ret;
-
- if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
- encoder->port)))
- ret = icl_calc_tbt_pll(crtc_state, &pll_params);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- ret = icl_calc_wrpll(crtc_state, &pll_params);
- else
- ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
-
- if (!ret)
- return false;
+ memset(pll_state, 0, sizeof(*pll_state));
- cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
- pll_params.dco_integer;
+ pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params->dco_fraction) |
+ pll_params->dco_integer;
- cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
- DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
- DPLL_CFGCR1_KDIV(pll_params.kdiv) |
- DPLL_CFGCR1_PDIV(pll_params.pdiv);
+ pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
+ DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
+ DPLL_CFGCR1_KDIV(pll_params->kdiv) |
+ DPLL_CFGCR1_PDIV(pll_params->pdiv);
- if (INTEL_GEN(dev_priv) >= 12)
- cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
+ if (INTEL_GEN(i915) >= 12)
+ pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
else
- cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
-
- memset(pll_state, 0, sizeof(*pll_state));
-
- pll_state->cfgcr0 = cfgcr0;
- pll_state->cfgcr1 = cfgcr1;
-
- return true;
+ pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
}
-
static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
{
return id - DPLL_ID_ICL_MGPLL1;
@@ -3493,19 +3481,29 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_wrpll_params pll_params = { };
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum port port = encoder->port;
unsigned long dpll_mask;
+ int ret;
- if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ ret = icl_calc_wrpll(crtc_state, &pll_params);
+ else
+ ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
+
+ if (!ret) {
drm_dbg_kms(&dev_priv->drm,
"Could not calculate combo PHY PLL state.\n");
return false;
}
+ icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+
if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
dpll_mask =
BIT(DPLL_ID_EHL_DPLL4) |
@@ -3539,16 +3537,19 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_wrpll_params pll_params = { };
struct icl_port_dpll *port_dpll;
enum intel_dpll_id dpll_id;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
+ if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
drm_dbg_kms(&dev_priv->drm,
"Could not calculate TBT PLL state.\n");
return false;
}
+ icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
BIT(DPLL_ID_ICL_TBTPLL));
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 29fec6a92d17..566fa72427b3 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -34,152 +34,52 @@
#define DSB_BYTE_EN_SHIFT 20
#define DSB_REG_VALUE_MASK 0xfffff
-static bool is_dsb_busy(struct intel_dsb *dsb)
+static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
+ enum dsb_id id)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
+ return DSB_STATUS & intel_de_read(i915, DSB_CTRL(pipe, id));
}
-static bool intel_dsb_enable_engine(struct intel_dsb *dsb)
+static bool intel_dsb_enable_engine(struct drm_i915_private *i915,
+ enum pipe pipe, enum dsb_id id)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
- dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
+ dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
if (DSB_STATUS & dsb_ctrl) {
- drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
+ drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl |= DSB_ENABLE;
- intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+ intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
- intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
+ intel_de_posting_read(i915, DSB_CTRL(pipe, id));
return true;
}
-static bool intel_dsb_disable_engine(struct intel_dsb *dsb)
+static bool intel_dsb_disable_engine(struct drm_i915_private *i915,
+ enum pipe pipe, enum dsb_id id)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
- dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
+ dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
if (DSB_STATUS & dsb_ctrl) {
- drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
+ drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl &= ~DSB_ENABLE;
- intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+ intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
- intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
+ intel_de_posting_read(i915, DSB_CTRL(pipe, id));
return true;
}
/**
- * intel_dsb_get() - Allocate DSB context and return a DSB instance.
- * @crtc: intel_crtc structure to get pipe info.
- *
- * This function provides handle of a DSB instance, for the further DSB
- * operations.
- *
- * Returns: address of Intel_dsb instance requested for.
- * Failure: Returns the same DSB instance, but without a command buffer.
- */
-
-struct intel_dsb *
-intel_dsb_get(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
- struct intel_dsb *dsb = &crtc->dsb;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 *buf;
- intel_wakeref_t wakeref;
-
- if (!HAS_DSB(i915))
- return dsb;
-
- if (dsb->refcount++ != 0)
- return dsb;
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
- if (IS_ERR(obj)) {
- drm_err(&i915->drm, "Gem object creation failed\n");
- goto out;
- }
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma)) {
- drm_err(&i915->drm, "Vma creation failed\n");
- i915_gem_object_put(obj);
- goto out;
- }
-
- buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
- if (IS_ERR(buf)) {
- drm_err(&i915->drm, "Command buffer creation failed\n");
- goto out;
- }
-
- dsb->id = DSB1;
- dsb->vma = vma;
- dsb->cmd_buf = buf;
-
-out:
- /*
- * On error dsb->cmd_buf will continue to be NULL, making the writes
- * pass-through. Leave the dangling ref to be removed later by the
- * corresponding intel_dsb_put(): the important error message will
- * already be logged above.
- */
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
- return dsb;
-}
-
-/**
- * intel_dsb_put() - To destroy DSB context.
- * @dsb: intel_dsb structure.
- *
- * This function destroys the DSB context allocated by a dsb_get(), by
- * unpinning and releasing the VMA object associated with it.
- */
-
-void intel_dsb_put(struct intel_dsb *dsb)
-{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (!HAS_DSB(i915))
- return;
-
- if (drm_WARN_ON(&i915->drm, dsb->refcount == 0))
- return;
-
- if (--dsb->refcount == 0) {
- i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
- dsb->cmd_buf = NULL;
- dsb->free_pos = 0;
- dsb->ins_start_offset = 0;
- }
-}
-
-/**
* intel_dsb_indexed_reg_write() -Write to the DSB context for auto
* increment register.
- * @dsb: intel_dsb structure.
+ * @crtc_state: intel_crtc_state structure
* @reg: register address.
* @val: value.
*
@@ -189,19 +89,20 @@ void intel_dsb_put(struct intel_dsb *dsb)
* is done through mmio write.
*/
-void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
- u32 val)
+void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct intel_dsb *dsb = crtc_state->dsb;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 *buf = dsb->cmd_buf;
+ u32 *buf;
u32 reg_val;
- if (!buf) {
+ if (!dsb) {
intel_de_write(dev_priv, reg, val);
return;
}
-
+ buf = dsb->cmd_buf;
if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
@@ -256,7 +157,7 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
/**
* intel_dsb_reg_write() -Write to the DSB context for normal
* register.
- * @dsb: intel_dsb structure.
+ * @crtc_state: intel_crtc_state structure
* @reg: register address.
* @val: value.
*
@@ -265,17 +166,21 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
* and rest all erroneous condition register programming is done
* through mmio write.
*/
-void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
+void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 *buf = dsb->cmd_buf;
+ struct intel_dsb *dsb;
+ u32 *buf;
- if (!buf) {
+ dsb = crtc_state->dsb;
+ if (!dsb) {
intel_de_write(dev_priv, reg, val);
return;
}
+ buf = dsb->cmd_buf;
if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
@@ -290,26 +195,27 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
/**
* intel_dsb_commit() - Trigger workload execution of DSB.
- * @dsb: intel_dsb structure.
+ * @crtc_state: intel_crtc_state structure
*
* This function is used to do actual write to hardware using DSB.
* On errors, fall back to MMIO. Also this function help to reset the context.
*/
-void intel_dsb_commit(struct intel_dsb *dsb)
+void intel_dsb_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct intel_dsb *dsb = crtc_state->dsb;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
u32 tail;
- if (!dsb->free_pos)
+ if (!(dsb && dsb->free_pos))
return;
- if (!intel_dsb_enable_engine(dsb))
+ if (!intel_dsb_enable_engine(dev_priv, pipe, dsb->id))
goto reset;
- if (is_dsb_busy(dsb)) {
+ if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
drm_err(&dev_priv->drm,
"HEAD_PTR write failed - dsb engine is busy.\n");
goto reset;
@@ -322,7 +228,7 @@ void intel_dsb_commit(struct intel_dsb *dsb)
memset(&dsb->cmd_buf[dsb->free_pos], 0,
(tail - dsb->free_pos * 4));
- if (is_dsb_busy(dsb)) {
+ if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
drm_err(&dev_priv->drm,
"TAIL_PTR write failed - dsb engine is busy.\n");
goto reset;
@@ -332,7 +238,7 @@ void intel_dsb_commit(struct intel_dsb *dsb)
i915_ggtt_offset(dsb->vma), tail);
intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
i915_ggtt_offset(dsb->vma) + tail);
- if (wait_for(!is_dsb_busy(dsb), 1)) {
+ if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
drm_err(&dev_priv->drm,
"Timed out waiting for DSB workload completion.\n");
goto reset;
@@ -341,5 +247,83 @@ void intel_dsb_commit(struct intel_dsb *dsb)
reset:
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
- intel_dsb_disable_engine(dsb);
+ intel_dsb_disable_engine(dev_priv, pipe, dsb->id);
+}
+
+/**
+ * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
+ * @crtc_state: intel_crtc_state structure to prepare associated dsb instance.
+ *
+ * This function prepare the command buffer which is used to store dsb
+ * instructions with data.
+ */
+void intel_dsb_prepare(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *buf;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_DSB(i915))
+ return;
+
+ dsb = kmalloc(sizeof(*dsb), GFP_KERNEL);
+ if (!dsb) {
+ drm_err(&i915->drm, "DSB object creation failed\n");
+ return;
+ }
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
+ if (IS_ERR(obj)) {
+ drm_err(&i915->drm, "Gem object creation failed\n");
+ kfree(dsb);
+ goto out;
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ drm_err(&i915->drm, "Vma creation failed\n");
+ i915_gem_object_put(obj);
+ kfree(dsb);
+ goto out;
+ }
+
+ buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(buf)) {
+ drm_err(&i915->drm, "Command buffer creation failed\n");
+ i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+ kfree(dsb);
+ goto out;
+ }
+
+ dsb->id = DSB1;
+ dsb->vma = vma;
+ dsb->cmd_buf = buf;
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ crtc_state->dsb = dsb;
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+/**
+ * intel_dsb_cleanup() - To cleanup DSB context.
+ * @crtc_state: intel_crtc_state structure to cleanup associated dsb instance.
+ *
+ * This function cleanup the DSB context by unpinning and releasing
+ * the VMA object associated with it.
+ */
+void intel_dsb_cleanup(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->dsb)
+ return;
+
+ i915_vma_unpin_and_release(&crtc_state->dsb->vma, I915_VMA_RELEASE_MAP);
+ kfree(crtc_state->dsb);
+ crtc_state->dsb = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 395ef9ce558e..654a11f24b80 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -10,7 +10,7 @@
#include "i915_reg.h"
-struct intel_crtc;
+struct intel_crtc_state;
struct i915_vma;
enum dsb_id {
@@ -22,7 +22,6 @@ enum dsb_id {
};
struct intel_dsb {
- long refcount;
enum dsb_id id;
u32 *cmd_buf;
struct i915_vma *vma;
@@ -41,12 +40,12 @@ struct intel_dsb {
u32 ins_start_offset;
};
-struct intel_dsb *
-intel_dsb_get(struct intel_crtc *crtc);
-void intel_dsb_put(struct intel_dsb *dsb);
-void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val);
-void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
- u32 val);
-void intel_dsb_commit(struct intel_dsb *dsb);
+void intel_dsb_prepare(struct intel_crtc_state *crtc_state);
+void intel_dsb_cleanup(struct intel_crtc_state *crtc_state);
+void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val);
+void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val);
+void intel_dsb_commit(const struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 5cd09034519b..307ed8ae9a19 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -324,6 +324,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
+ int num_modes;
/*
* We should probably have an i2c driver get_modes function for those
@@ -331,21 +332,22 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(connector,
- intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
- if (!list_empty(&connector->probed_modes))
- return 1;
+ num_modes = intel_ddc_get_modes(connector,
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
+ if (num_modes)
+ return num_modes;
if (fixed_mode) {
struct drm_display_mode *mode;
+
mode = drm_mode_duplicate(connector->dev, fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
- return 1;
+ num_modes++;
}
}
- return 0;
+ return num_modes;
}
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 412572f88b67..24c3a0f212c6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -132,14 +132,13 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
}
/* enable it... */
- fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
- fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl = FBC_CTL_INTERVAL(params->interval);
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff);
if (params->fence_id >= 0)
- fbc_ctl |= params->fence_id;
+ fbc_ctl |= FBC_CTL_FENCENO(params->fence_id);
intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
}
@@ -188,8 +187,30 @@ static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
}
+static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+ enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
+static void i965_fbc_recompress(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+ enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
/* This function forces a CFB recompression through the nuke operation. */
-static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
+static void snb_fbc_recompress(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
@@ -199,6 +220,16 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
}
+static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 6)
+ snb_fbc_recompress(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 4)
+ i965_fbc_recompress(dev_priv);
+ else
+ i8xx_fbc_recompress(dev_priv);
+}
+
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
{
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
@@ -316,21 +347,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (dev_priv->fbc.false_color)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
- if (IS_IVYBRIDGE(dev_priv)) {
- /* WaFbcAsynchFlipDisableFbcQueue:ivb */
- intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1,
- intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe),
- intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS);
- }
-
- if (INTEL_GEN(dev_priv) >= 11)
- /* Wa_1409120013:icl,ehl,tgl */
- intel_de_write(dev_priv, ILK_DPFC_CHICKEN,
- ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_fbc_recompress(dev_priv);
@@ -461,7 +477,7 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
unsigned int size, unsigned int fb_cpp)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- struct drm_mm_node *uninitialized_var(compressed_llb);
+ struct drm_mm_node *compressed_llb;
int ret;
drm_WARN_ON(&dev_priv->drm,
@@ -696,9 +712,16 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
cache->fb.format = fb->format;
- cache->fb.stride = fb->pitches[0];
cache->fb.modifier = fb->modifier;
+ /* FIXME is this correct? */
+ cache->fb.stride = plane_state->color_plane[0].stride;
+ if (drm_rotation_90_or_270(plane_state->hw.rotation))
+ cache->fb.stride *= fb->format->cpp[0];
+
+ /* FBC1 compression interval: arbitrary choice of 1 second */
+ cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
+
cache->fence_y_offset = intel_plane_fence_y_offset(plane_state);
drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
@@ -747,7 +770,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
return false;
}
- if (!i915_modparams.enable_fbc) {
+ if (!dev_priv->params.enable_fbc) {
fbc->no_fbc_reason = "disabled per module param or by default";
return false;
}
@@ -814,6 +837,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
+ fbc->no_fbc_reason = "pixel format is invalid";
+ return false;
+ }
+
if (!rotation_is_valid(dev_priv, cache->fb.format->format,
cache->plane.rotation)) {
fbc->no_fbc_reason = "rotation unsupported";
@@ -830,11 +858,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
- fbc->no_fbc_reason = "pixel format is invalid";
- return false;
- }
-
if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
cache->fb.format->has_alpha) {
fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
@@ -892,6 +915,8 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->fence_id = cache->fence_id;
params->fence_y_offset = cache->fence_y_offset;
+ params->interval = cache->interval;
+
params->crtc.pipe = crtc->pipe;
params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
@@ -1028,7 +1053,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
fbc->flip_pending = false;
- if (!i915_modparams.enable_fbc) {
+ if (!dev_priv->params.enable_fbc) {
intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
__intel_fbc_disable(dev_priv);
@@ -1101,11 +1126,19 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
if (!HAS_FBC(dev_priv))
return;
+ /*
+ * GTT tracking does not nuke the entire cfb
+ * so don't clear busy_bits set for some other
+ * reason.
+ */
+ if (origin == ORIGIN_GTT)
+ return;
+
mutex_lock(&fbc->lock);
fbc->busy_bits &= ~frontbuffer_bits;
- if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP)
goto out;
if (!fbc->busy_bits && fbc->crtc &&
@@ -1377,8 +1410,8 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
*/
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
{
- if (i915_modparams.enable_fbc >= 0)
- return !!i915_modparams.enable_fbc;
+ if (dev_priv->params.enable_fbc >= 0)
+ return !!dev_priv->params.enable_fbc;
if (!HAS_FBC(dev_priv))
return 0;
@@ -1422,20 +1455,15 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->display.has_fbc = false;
- i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
+ dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
- i915_modparams.enable_fbc);
+ dev_priv->params.enable_fbc);
if (!HAS_FBC(dev_priv)) {
fbc->no_fbc_reason = "unsupported by this chipset";
return;
}
- /* This value was pulled out of someone's hat */
- if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
- intel_de_write(dev_priv, FBC_CONTROL,
- 500 << FBC_CTL_INTERVAL_SHIFT);
-
/* We still don't have any sort of hardware state readout for FBC, so
* deactivate it in case the BIOS activated it to make sure software
* matches the hardware state. */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 2cbc4619b4ce..89a4d294822d 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -40,15 +40,15 @@ bool intel_hdcp_is_ksv_valid(u8 *ksv)
}
static
-int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
+int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim, u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret, i, tries = 2;
/* HDCP spec states that we must retry the bksv if it is invalid */
for (i = 0; i < tries; i++) {
- ret = shim->read_bksv(intel_dig_port, bksv);
+ ret = shim->read_bksv(dig_port, bksv);
if (ret)
return ret;
if (intel_hdcp_is_ksv_valid(bksv))
@@ -65,7 +65,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
/* Is HDCP1.4 capable on Platform and Sink */
bool intel_hdcp_capable(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
bool capable = false;
u8 bksv[5];
@@ -74,9 +74,9 @@ bool intel_hdcp_capable(struct intel_connector *connector)
return capable;
if (shim->hdcp_capable) {
- shim->hdcp_capable(intel_dig_port, &capable);
+ shim->hdcp_capable(dig_port, &capable);
} else {
- if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
+ if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
capable = true;
}
@@ -86,7 +86,7 @@ bool intel_hdcp_capable(struct intel_connector *connector)
/* Is HDCP2.2 capable on Platform and Sink */
bool intel_hdcp2_capable(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
bool capable = false;
@@ -104,7 +104,7 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
mutex_unlock(&dev_priv->hdcp_comp_mutex);
/* Sink's capability for HDCP2.2 */
- hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
+ hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
return capable;
}
@@ -125,14 +125,14 @@ static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
LINK_ENCRYPTION_STATUS;
}
-static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
+static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
int ret, read_ret;
bool ksv_ready;
/* Poll for ksv list ready (spec says max time allowed is 5s) */
- ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
+ ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
&ksv_ready),
read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
100 * 1000);
@@ -300,16 +300,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
int ret, i, j, sha_idx;
/* Process V' values from the receiver */
for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
- ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
+ ret = shim->read_v_prime_part(dig_port, i, &vprime);
if (ret)
return ret;
intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
@@ -528,20 +528,20 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
static
int intel_hdcp_auth_downstream(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
u8 bstatus[2], num_downstream, *ksv_fifo;
int ret, i, tries = 3;
- ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
+ ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
"KSV list failed to become ready (%d)\n", ret);
return ret;
}
- ret = shim->read_bstatus(intel_dig_port, bstatus);
+ ret = shim->read_bstatus(dig_port, bstatus);
if (ret)
return ret;
@@ -571,12 +571,12 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
return -ENOMEM;
}
- ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
+ ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
if (ret)
goto err;
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
- num_downstream)) {
+ num_downstream) > 0) {
drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
ret = -EPERM;
goto err;
@@ -611,12 +611,12 @@ err:
/* Implements Part 1 of the HDCP authorization procedure */
static int intel_hdcp_auth(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
union {
@@ -640,7 +640,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
* displays, this is not necessary.
*/
if (shim->hdcp_capable) {
- ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
+ ret = shim->hdcp_capable(dig_port, &hdcp_capable);
if (ret)
return ret;
if (!hdcp_capable) {
@@ -670,7 +670,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_ANLO(dev_priv, cpu_transcoder, port));
an.reg[1] = intel_de_read(dev_priv,
HDCP_ANHI(dev_priv, cpu_transcoder, port));
- ret = shim->write_an_aksv(intel_dig_port, an.shim);
+ ret = shim->write_an_aksv(dig_port, an.shim);
if (ret)
return ret;
@@ -678,11 +678,11 @@ static int intel_hdcp_auth(struct intel_connector *connector)
memset(&bksv, 0, sizeof(bksv));
- ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
+ ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
if (ret < 0)
return ret;
- if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
drm_err(&dev_priv->drm, "BKSV is revoked\n");
return -EPERM;
}
@@ -692,14 +692,14 @@ static int intel_hdcp_auth(struct intel_connector *connector)
intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
bksv.reg[1]);
- ret = shim->repeater_present(intel_dig_port, &repeater_present);
+ ret = shim->repeater_present(dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
intel_de_write(dev_priv, HDCP_REP_CTL,
intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
- ret = shim->toggle_signalling(intel_dig_port, true);
+ ret = shim->toggle_signalling(dig_port, true);
if (ret)
return ret;
@@ -732,7 +732,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
*/
for (i = 0; i < tries; i++) {
ri.reg = 0;
- ret = shim->read_ri_prime(intel_dig_port, ri.shim);
+ ret = shim->read_ri_prime(dig_port, ri.shim);
if (ret)
return ret;
intel_de_write(dev_priv,
@@ -776,10 +776,10 @@ static int intel_hdcp_auth(struct intel_connector *connector)
static int _intel_hdcp_disable(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
@@ -796,7 +796,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return -ETIMEDOUT;
}
- ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+ ret = hdcp->shim->toggle_signalling(dig_port, false);
if (ret) {
drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
return ret;
@@ -859,10 +859,10 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
/* Implements Part 3 of the HDCP authorization procedure */
static int intel_hdcp_check_link(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
@@ -888,7 +888,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (hdcp->shim->check_link(intel_dig_port)) {
+ if (hdcp->shim->check_link(dig_port)) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
schedule_work(&hdcp->prop_work);
@@ -1242,7 +1242,7 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector)
/* Authentication flow starts from here */
static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
@@ -1264,12 +1264,12 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (ret < 0)
return ret;
- ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
+ ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
sizeof(msgs.ake_init));
if (ret < 0)
return ret;
- ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
&msgs.send_cert, sizeof(msgs.send_cert));
if (ret < 0)
return ret;
@@ -1283,7 +1283,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
msgs.send_cert.cert_rx.receiver_id,
- 1)) {
+ 1) > 0) {
drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
return -EPERM;
}
@@ -1298,11 +1298,11 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (ret < 0)
return ret;
- ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
+ ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
if (ret < 0)
return ret;
- ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
&msgs.send_hprime, sizeof(msgs.send_hprime));
if (ret < 0)
return ret;
@@ -1313,7 +1313,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (!hdcp->is_paired) {
/* Pairing is required */
- ret = shim->read_2_2_msg(intel_dig_port,
+ ret = shim->read_2_2_msg(dig_port,
HDCP_2_2_AKE_SEND_PAIRING_INFO,
&msgs.pairing_info,
sizeof(msgs.pairing_info));
@@ -1331,7 +1331,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
static int hdcp2_locality_check(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_lc_init lc_init;
@@ -1345,12 +1345,12 @@ static int hdcp2_locality_check(struct intel_connector *connector)
if (ret < 0)
continue;
- ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
+ ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
sizeof(msgs.lc_init));
if (ret < 0)
continue;
- ret = shim->read_2_2_msg(intel_dig_port,
+ ret = shim->read_2_2_msg(dig_port,
HDCP_2_2_LC_SEND_LPRIME,
&msgs.send_lprime,
sizeof(msgs.send_lprime));
@@ -1367,7 +1367,7 @@ static int hdcp2_locality_check(struct intel_connector *connector)
static int hdcp2_session_key_exchange(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp2_ske_send_eks send_eks;
int ret;
@@ -1376,7 +1376,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
if (ret < 0)
return ret;
- ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
+ ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
sizeof(send_eks));
if (ret < 0)
return ret;
@@ -1387,7 +1387,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
static
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
@@ -1409,12 +1409,12 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
/* Send it to Repeater */
- ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
+ ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
sizeof(msgs.stream_manage));
if (ret < 0)
return ret;
- ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
&msgs.stream_ready, sizeof(msgs.stream_ready));
if (ret < 0)
return ret;
@@ -1439,7 +1439,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
static
int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
@@ -1451,7 +1451,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
u8 *rx_info;
int ret;
- ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
&msgs.recvid_list, sizeof(msgs.recvid_list));
if (ret < 0)
return ret;
@@ -1484,7 +1484,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
msgs.recvid_list.receiver_ids,
- device_cnt)) {
+ device_cnt) > 0) {
drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
return -EPERM;
}
@@ -1496,7 +1496,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
return ret;
hdcp->seq_num_v = seq_num_v;
- ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
+ ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
sizeof(msgs.rep_ack));
if (ret < 0)
return ret;
@@ -1517,7 +1517,7 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector)
static int hdcp2_authenticate_sink(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
@@ -1543,7 +1543,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
}
if (shim->config_stream_type) {
- ret = shim->config_stream_type(intel_dig_port,
+ ret = shim->config_stream_type(dig_port,
hdcp->is_repeater,
hdcp->content_type);
if (ret < 0)
@@ -1569,10 +1569,10 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
static int hdcp2_enable_encryption(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
@@ -1580,7 +1580,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
- ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
+ ret = hdcp->shim->toggle_signalling(dig_port, true);
if (ret) {
drm_err(&dev_priv->drm,
"Failed to enable HDCP signalling. %d\n",
@@ -1608,10 +1608,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
static int hdcp2_disable_encryption(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
@@ -1630,7 +1630,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
if (hdcp->shim->toggle_signalling) {
- ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+ ret = hdcp->shim->toggle_signalling(dig_port, false);
if (ret) {
drm_err(&dev_priv->drm,
"Failed to disable HDCP signalling. %d\n",
@@ -1723,10 +1723,10 @@ static int _intel_hdcp2_disable(struct intel_connector *connector)
/* Implements the Link Integrity Check for HDCP2.2 */
static int intel_hdcp2_check_link(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = intel_dig_port->base.port;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
@@ -1751,7 +1751,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- ret = hdcp->shim->check_2_2_link(intel_dig_port);
+ ret = hdcp->shim->check_2_2_link(dig_port);
if (ret == HDCP_LINK_PROTECTED) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
@@ -1923,8 +1923,11 @@ static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
return false;
- return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
+ return (INTEL_GEN(dev_priv) >= 10 ||
+ IS_GEMINILAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv));
}
void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
@@ -2083,6 +2086,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
(conn_state->hdcp_content_type != hdcp->content_type &&
conn_state->content_protection !=
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+ bool desired_and_not_enabled = false;
/*
* During the HDCP encryption session if Type change is requested,
@@ -2105,8 +2109,15 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
}
if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED ||
- content_protection_type_changed)
+ DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ mutex_lock(&hdcp->mutex);
+ /* Avoid enabling hdcp, if it already ENABLED */
+ desired_and_not_enabled =
+ hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ mutex_unlock(&hdcp->mutex);
+ }
+
+ if (desired_and_not_enabled || content_protection_type_changed)
intel_hdcp_enable(connector,
crtc_state->cpu_transcoder,
(u8)conn_state->hdcp_content_type);
@@ -2155,6 +2166,19 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
return;
}
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+ /*
+ * Fix the HDCP uapi content protection state in case of modeset.
+ * FIXME: As per HDCP content protection property uapi doc, an uevent()
+ * need to be sent if there is transition from ENABLED->DESIRED.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state) &&
+ (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
+ new_state->content_protection =
+ DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
/*
* Nothing to do if the state didn't change, or HDCP was activated since
* the last commit. And also no change in hdcp content type.
@@ -2167,8 +2191,6 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
return;
}
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
- new_state->crtc);
crtc_state->mode_changed = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 95b6d9457910..de2ce5632b94 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -88,10 +88,10 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port =
+ struct intel_digital_port *dig_port =
container_of(&encoder->base, struct intel_digital_port,
base.base);
- return &intel_dig_port->hdmi;
+ return &dig_port->hdmi;
}
static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector)
@@ -660,7 +660,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
enum hdmi_infoframe_type type,
const union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@@ -681,7 +681,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
buffer[3] = 0;
len++;
- intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
+ dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
}
void intel_read_infoframe(struct intel_encoder *encoder,
@@ -689,7 +689,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
enum hdmi_infoframe_type type,
union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u8 buffer[VIDEO_DIP_DATA_SIZE];
int ret;
@@ -697,7 +697,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(type)) == 0)
return;
- intel_dig_port->read_infoframe(encoder, crtc_state,
+ dig_port->read_infoframe(encoder, crtc_state,
type, buffer, sizeof(buffer));
/* Fill the 'hole' (see big comment above) at position 3 */
@@ -872,8 +872,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1057,8 +1057,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
@@ -1275,11 +1275,11 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
adapter, enable);
}
-static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
+static int intel_hdmi_hdcp_read(struct intel_digital_port *dig_port,
unsigned int offset, void *buffer, size_t size)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
@@ -1304,11 +1304,11 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
return ret >= 0 ? -EIO : ret;
}
-static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
+static int intel_hdmi_hdcp_write(struct intel_digital_port *dig_port,
unsigned int offset, void *buffer, size_t size)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
@@ -1338,16 +1338,16 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
u8 *an)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
- ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
+ ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
@@ -1363,13 +1363,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
return 0;
}
-static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
+static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port,
u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
@@ -1378,13 +1378,13 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port,
u8 *bstatus)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
@@ -1393,14 +1393,14 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port,
bool *repeater_present)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
u8 val;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
ret);
@@ -1411,13 +1411,13 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
u8 *ri_prime)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
@@ -1426,14 +1426,14 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
bool *ksv_ready)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
u8 val;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
ret);
@@ -1444,12 +1444,12 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
int num_downstream, u8 *ksv_fifo)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
drm_dbg_kms(&i915->drm,
@@ -1460,16 +1460,16 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
int i, u32 *part)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
return -EINVAL;
- ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
@@ -1480,7 +1480,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_crtc *crtc = connector->base.state->crtc;
struct intel_crtc *intel_crtc = container_of(crtc,
struct intel_crtc, base);
@@ -1494,13 +1494,13 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
usleep_range(25, 50);
}
- ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
+ ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, false);
if (ret) {
drm_err(&dev_priv->drm,
"Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
- ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
+ ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, true);
if (ret) {
drm_err(&dev_priv->drm,
"Enable HDCP signalling failed (%d)\n", ret);
@@ -1511,10 +1511,10 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
}
static
-int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
bool enable)
{
- struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_connector *connector = hdmi->attached_connector;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int ret;
@@ -1522,7 +1522,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
if (!enable)
usleep_range(6, 60); /* Bspec says >= 6us */
- ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
+ ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, enable);
if (ret) {
drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
enable ? "Enable" : "Disable", ret);
@@ -1540,12 +1540,12 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
}
static
-bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
+bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_connector *connector =
- intel_dig_port->hdmi.attached_connector;
- enum port port = intel_dig_port->base.port;
+ dig_port->hdmi.attached_connector;
+ enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
union {
@@ -1553,7 +1553,7 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
u8 shim[DRM_HDCP_RI_LEN];
} ri;
- ret = intel_hdmi_hdcp_read_ri_prime(intel_dig_port, ri.shim);
+ ret = intel_hdmi_hdcp_read_ri_prime(dig_port, ri.shim);
if (ret)
return false;
@@ -1563,8 +1563,7 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
- drm_err(&i915->drm,
- "Ri' mismatch detected, link check failed (%x)\n",
+ drm_dbg_kms(&i915->drm, "Ri' mismatch detected (%x)\n",
intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
port)));
return false;
@@ -1572,6 +1571,20 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
return true;
}
+static
+bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ int retry;
+
+ for (retry = 0; retry < 3; retry++)
+ if (intel_hdmi_hdcp_check_link_once(dig_port))
+ return true;
+
+ drm_err(&i915->drm, "Link check failed\n");
+ return false;
+}
+
struct hdcp2_hdmi_msg_timeout {
u8 msg_id;
u16 timeout;
@@ -1586,10 +1599,10 @@ static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = {
};
static
-int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
u8 *rx_status)
{
- return intel_hdmi_hdcp_read(intel_dig_port,
+ return intel_hdmi_hdcp_read(dig_port,
HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET,
rx_status,
HDCP_2_2_HDMI_RXSTATUS_LEN);
@@ -1615,15 +1628,15 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
}
static int
-hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
u8 msg_id, bool *msg_ready,
ssize_t *msg_sz)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
- ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+ ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
if (ret < 0) {
drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
ret);
@@ -1643,10 +1656,10 @@ hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
}
static ssize_t
-intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
u8 msg_id, bool paired)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
bool msg_ready = false;
int timeout, ret;
ssize_t msg_sz = 0;
@@ -1655,7 +1668,7 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
if (timeout < 0)
return timeout;
- ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port,
+ ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port,
msg_id, &msg_ready,
&msg_sz),
!ret && msg_ready && msg_sz, timeout * 1000,
@@ -1668,26 +1681,26 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *dig_port,
void *buf, size_t size)
{
unsigned int offset;
offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET;
- return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size);
+ return intel_hdmi_hdcp_write(dig_port, offset, buf, size);
}
static
-int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
u8 msg_id, void *buf, size_t size)
{
- struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
- struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
unsigned int offset;
ssize_t ret;
- ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id,
+ ret = intel_hdmi_hdcp2_wait_for_msg(dig_port, msg_id,
hdcp->is_paired);
if (ret < 0)
return ret;
@@ -1704,7 +1717,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
}
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
- ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
+ ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret);
if (ret)
drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
msg_id, ret);
@@ -1713,12 +1726,12 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
}
static
-int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port)
{
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
- ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+ ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
if (ret)
return ret;
@@ -1735,14 +1748,14 @@ int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
}
static
-int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+int intel_hdmi_hdcp2_capable(struct intel_digital_port *dig_port,
bool *capable)
{
u8 hdcp2_version;
int ret;
*capable = false;
- ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
+ ret = intel_hdmi_hdcp_read(dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
&hdcp2_version, sizeof(hdcp2_version));
if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK)
*capable = true;
@@ -2050,7 +2063,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct intel_digital_port *intel_dig_port =
+ struct intel_digital_port *dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
@@ -2094,7 +2107,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- intel_dig_port->set_infoframes(encoder,
+ dig_port->set_infoframes(encoder,
false,
old_crtc_state, old_conn_state);
@@ -2229,8 +2242,11 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
if (clock > max_dotclk)
return MODE_CLOCK_HIGH;
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ if (!has_hdmi_sink)
+ return MODE_CLOCK_LOW;
clock *= 2;
+ }
if (drm_mode_is_420_only(&connector->display_info, mode))
clock /= 2;
@@ -2415,8 +2431,8 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
return 0;
}
-static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2709,12 +2725,12 @@ static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *intel_dig_port =
+ struct intel_digital_port *dig_port =
enc_to_dig_port(encoder);
intel_hdmi_prepare(encoder, pipe_config);
- intel_dig_port->set_infoframes(encoder,
+ dig_port->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
}
@@ -2724,7 +2740,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
vlv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -2733,13 +2749,13 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
0x2b247878);
- dport->set_infoframes(encoder,
+ dig_port->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
- vlv_wait_port_ready(dev_priv, dport, 0x0);
+ vlv_wait_port_ready(dev_priv, dig_port, 0x0);
}
static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
@@ -2800,7 +2816,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2810,13 +2826,13 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
/* Use 800mV-0dB */
chv_set_phy_signal_level(encoder, 128, 102, false);
- dport->set_infoframes(encoder,
+ dig_port->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
- vlv_wait_port_ready(dev_priv, dport, 0x0);
+ vlv_wait_port_ready(dev_priv, dig_port, 0x0);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
@@ -2904,7 +2920,7 @@ static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_digital_port *intel_dig_port =
+ struct intel_digital_port *dig_port =
hdmi_to_dig_port(intel_hdmi);
intel_attach_force_audio_property(connector);
@@ -2916,7 +2932,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
* ToDo: This needs to be extended for LSPCON implementation
* as well. Will be implemented separately.
*/
- if (!intel_dig_port->lspcon.active)
+ if (!dig_port->lspcon.active)
intel_attach_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
@@ -3076,6 +3092,24 @@ static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
+static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ WARN_ON(port == PORT_C);
+
+ /*
+ * Pin mapping for RKL depends on which PCH is present. With TGP, the
+ * final two outputs use type-c pins, even though they're actually
+ * combo outputs. With CMP, the traditional DDI A-D pins are used for
+ * all outputs.
+ */
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && phy >= PHY_C)
+ return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
+
+ return GMBUS_PIN_1_BXT + phy;
+}
+
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -3113,7 +3147,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
return ddc_pin;
}
- if (HAS_PCH_MCC(dev_priv))
+ if (IS_ROCKETLAKE(dev_priv))
+ ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
+ else if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
@@ -3133,52 +3169,52 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
return ddc_pin;
}
-void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
+void intel_infoframe_init(struct intel_digital_port *dig_port)
{
struct drm_i915_private *dev_priv =
- to_i915(intel_dig_port->base.base.dev);
+ to_i915(dig_port->base.base.dev);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_dig_port->write_infoframe = vlv_write_infoframe;
- intel_dig_port->read_infoframe = vlv_read_infoframe;
- intel_dig_port->set_infoframes = vlv_set_infoframes;
- intel_dig_port->infoframes_enabled = vlv_infoframes_enabled;
+ dig_port->write_infoframe = vlv_write_infoframe;
+ dig_port->read_infoframe = vlv_read_infoframe;
+ dig_port->set_infoframes = vlv_set_infoframes;
+ dig_port->infoframes_enabled = vlv_infoframes_enabled;
} else if (IS_G4X(dev_priv)) {
- intel_dig_port->write_infoframe = g4x_write_infoframe;
- intel_dig_port->read_infoframe = g4x_read_infoframe;
- intel_dig_port->set_infoframes = g4x_set_infoframes;
- intel_dig_port->infoframes_enabled = g4x_infoframes_enabled;
+ dig_port->write_infoframe = g4x_write_infoframe;
+ dig_port->read_infoframe = g4x_read_infoframe;
+ dig_port->set_infoframes = g4x_set_infoframes;
+ dig_port->infoframes_enabled = g4x_infoframes_enabled;
} else if (HAS_DDI(dev_priv)) {
- if (intel_dig_port->lspcon.active) {
- intel_dig_port->write_infoframe = lspcon_write_infoframe;
- intel_dig_port->read_infoframe = lspcon_read_infoframe;
- intel_dig_port->set_infoframes = lspcon_set_infoframes;
- intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled;
+ if (dig_port->lspcon.active) {
+ dig_port->write_infoframe = lspcon_write_infoframe;
+ dig_port->read_infoframe = lspcon_read_infoframe;
+ dig_port->set_infoframes = lspcon_set_infoframes;
+ dig_port->infoframes_enabled = lspcon_infoframes_enabled;
} else {
- intel_dig_port->write_infoframe = hsw_write_infoframe;
- intel_dig_port->read_infoframe = hsw_read_infoframe;
- intel_dig_port->set_infoframes = hsw_set_infoframes;
- intel_dig_port->infoframes_enabled = hsw_infoframes_enabled;
+ dig_port->write_infoframe = hsw_write_infoframe;
+ dig_port->read_infoframe = hsw_read_infoframe;
+ dig_port->set_infoframes = hsw_set_infoframes;
+ dig_port->infoframes_enabled = hsw_infoframes_enabled;
}
} else if (HAS_PCH_IBX(dev_priv)) {
- intel_dig_port->write_infoframe = ibx_write_infoframe;
- intel_dig_port->read_infoframe = ibx_read_infoframe;
- intel_dig_port->set_infoframes = ibx_set_infoframes;
- intel_dig_port->infoframes_enabled = ibx_infoframes_enabled;
+ dig_port->write_infoframe = ibx_write_infoframe;
+ dig_port->read_infoframe = ibx_read_infoframe;
+ dig_port->set_infoframes = ibx_set_infoframes;
+ dig_port->infoframes_enabled = ibx_infoframes_enabled;
} else {
- intel_dig_port->write_infoframe = cpt_write_infoframe;
- intel_dig_port->read_infoframe = cpt_read_infoframe;
- intel_dig_port->set_infoframes = cpt_set_infoframes;
- intel_dig_port->infoframes_enabled = cpt_infoframes_enabled;
+ dig_port->write_infoframe = cpt_write_infoframe;
+ dig_port->read_infoframe = cpt_read_infoframe;
+ dig_port->set_infoframes = cpt_set_infoframes;
+ dig_port->infoframes_enabled = cpt_infoframes_enabled;
}
}
-void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
- struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+ struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i2c_adapter *ddc;
@@ -3192,9 +3228,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
- if (drm_WARN(dev, intel_dig_port->max_lanes < 4,
+ if (drm_WARN(dev, dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
return;
@@ -3283,21 +3319,21 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
void intel_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
- struct intel_digital_port *intel_dig_port;
+ struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
- if (!intel_dig_port)
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
return;
intel_connector = intel_connector_alloc();
if (!intel_connector) {
- kfree(intel_dig_port);
+ kfree(dig_port);
return;
}
- intel_encoder = &intel_dig_port->base;
+ intel_encoder = &dig_port->base;
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
@@ -3354,12 +3390,12 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
- intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
- intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
- intel_dig_port->max_lanes = 4;
+ dig_port->hdmi.hdmi_reg = hdmi_reg;
+ dig_port->dp.output_reg = INVALID_MMIO_REG;
+ dig_port->max_lanes = 4;
- intel_infoframe_init(intel_dig_port);
+ intel_infoframe_init(dig_port);
- intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
- intel_hdmi_init_connector(intel_dig_port, intel_connector);
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ intel_hdmi_init_connector(dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 8ff1f76a63df..5b348dcab77a 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -25,7 +25,7 @@ enum port;
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
enum port port);
-void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder);
int intel_hdmi_compute_config(struct intel_encoder *encoder,
@@ -36,7 +36,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
bool high_tmds_clock_ratio,
bool scrambling);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
-void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
+void intel_infoframe_init(struct intel_digital_port *dig_port);
u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
u32 intel_hdmi_infoframe_enable(unsigned int type);
@@ -46,5 +46,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
union hdmi_infoframe *frame);
+bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 4f6f560e093e..3f1d7b804a66 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -89,6 +89,15 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
{
enum phy phy = intel_port_to_phy(dev_priv, port);
+ /*
+ * RKL + TGP PCH is a special case; we effectively choose the hpd_pin
+ * based on the DDI rather than the PHY (i.e., the last two outputs
+ * shold be HPD_PORT_{D,E} rather than {C,D}. Note that this differs
+ * from the behavior of both TGL+TGP and RKL+CMP.
+ */
+ if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv))
+ return HPD_PORT_A + port - PORT_A;
+
switch (phy) {
case PHY_F:
return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
@@ -274,24 +283,30 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
{
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
+ u64 old_epoch_counter;
+ bool ret = false;
drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
old_status = connector->base.status;
+ old_epoch_counter = connector->base.epoch_counter;
connector->base.status =
drm_helper_probe_detect(&connector->base, NULL, false);
- if (old_status == connector->base.status)
- return INTEL_HOTPLUG_UNCHANGED;
-
- drm_dbg_kms(&to_i915(dev)->drm,
- "[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.base.id,
- connector->base.name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->base.status));
+ if (old_epoch_counter != connector->base.epoch_counter)
+ ret = true;
- return INTEL_HOTPLUG_CHANGED;
+ if (ret) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
+ connector->base.base.id,
+ connector->base.name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->base.status),
+ old_epoch_counter,
+ connector->base.epoch_counter);
+ return INTEL_HOTPLUG_CHANGED;
+ }
+ return INTEL_HOTPLUG_UNCHANGED;
}
static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
@@ -347,6 +362,24 @@ static void i915_digport_work_func(struct work_struct *work)
}
}
+/**
+ * intel_hpd_trigger_irq - trigger an hpd irq event for a port
+ * @dig_port: digital port
+ *
+ * Trigger an HPD interrupt event for the given port, emulating a short pulse
+ * generated by the sink, and schedule the dig port work to handle it.
+ */
+void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
+ spin_unlock_irq(&i915->irq_lock);
+
+ queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 777b0743257e..a704d7c94d16 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -10,6 +10,7 @@
struct drm_i915_private;
struct intel_connector;
+struct intel_digital_port;
struct intel_encoder;
enum port;
@@ -18,6 +19,7 @@ enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
+void intel_hpd_trigger_irq(struct intel_digital_port *dig_port);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 6ff7b226f0a1..b781bf469644 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -550,11 +550,11 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
}
-bool lspcon_init(struct intel_digital_port *intel_dig_port)
+bool lspcon_init(struct intel_digital_port *dig_port)
{
- struct intel_dp *dp = &intel_dig_port->dp;
- struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_connector *connector = &dp->attached_connector->base;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index 37cfddf8a9c5..1cffe8a42a08 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -15,7 +15,7 @@ struct intel_digital_port;
struct intel_encoder;
struct intel_lspcon;
-bool lspcon_init(struct intel_digital_port *intel_dig_port);
+bool lspcon_init(struct intel_digital_port *dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
void lspcon_write_infoframe(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 872f2a489339..1888611244db 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -784,8 +784,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
struct drm_i915_private *dev_priv = to_i915(dev);
/* use the module option value if specified */
- if (i915_modparams.lvds_channel_mode > 0)
- return i915_modparams.lvds_channel_mode == 2;
+ if (dev_priv->params.lvds_channel_mode > 0)
+ return dev_priv->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index cc6b00959586..de995362f428 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -801,7 +801,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
const struct firmware *fw = NULL;
- const char *name = i915_modparams.vbt_firmware;
+ const char *name = dev_priv->params.vbt_firmware;
int ret;
if (!name || !*name)
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 66711e62fa71..52b4f6193b4c 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -100,12 +100,15 @@
#define CLK_RGB24_MASK 0x0
#define CLK_RGB16_MASK 0x070307
#define CLK_RGB15_MASK 0x070707
-#define CLK_RGB8I_MASK 0xffffff
+#define RGB30_TO_COLORKEY(c) \
+ ((((c) & 0x3fc00000) >> 6) | (((c) & 0x000ff000) >> 4) | (((c) & 0x000003fc) >> 2))
#define RGB16_TO_COLORKEY(c) \
- (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+ ((((c) & 0xf800) << 8) | (((c) & 0x07e0) << 5) | (((c) & 0x001f) << 3))
#define RGB15_TO_COLORKEY(c) \
- (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+ ((((c) & 0x7c00) << 9) | (((c) & 0x03e0) << 6) | (((c) & 0x001f) << 3))
+#define RGB8I_TO_COLORKEY(c) \
+ ((((c) & 0xff) << 16) | (((c) & 0xff) << 8) | (((c) & 0xff) << 0))
/* overlay flip addr flag */
#define OFC_UPDATE 0x1
@@ -682,8 +685,8 @@ static void update_colorkey(struct intel_overlay *overlay,
switch (format) {
case DRM_FORMAT_C8:
- key = 0;
- flags |= CLK_RGB8I_MASK;
+ key = RGB8I_TO_COLORKEY(key);
+ flags |= CLK_RGB24_MASK;
break;
case DRM_FORMAT_XRGB1555:
key = RGB15_TO_COLORKEY(key);
@@ -693,6 +696,11 @@ static void update_colorkey(struct intel_overlay *overlay,
key = RGB16_TO_COLORKEY(key);
flags |= CLK_RGB16_MASK;
break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ key = RGB30_TO_COLORKEY(key);
+ flags |= CLK_RGB24_MASK;
+ break;
default:
flags |= CLK_RGB24_MASK;
break;
@@ -777,9 +785,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
if (!overlay->active) {
- u32 oconfig;
-
- oconfig = OCONF_CC_OUT_8BIT;
+ const struct intel_crtc_state *crtc_state =
+ overlay->crtc->config;
+ u32 oconfig = 0;
+
+ if (crtc_state->gamma_enable &&
+ crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ oconfig |= OCONF_CC_OUT_8BIT;
+ if (crtc_state->gamma_enable)
+ oconfig |= OCONF_GAMMA2_ENABLE;
if (IS_GEN(dev_priv, 4))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 3c5056dbf607..aaed9eb3b56c 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -521,10 +521,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
- if (i915_modparams.invert_brightness < 0)
+ if (dev_priv->params.invert_brightness < 0)
return val;
- if (i915_modparams.invert_brightness > 0 ||
+ if (dev_priv->params.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val + panel->backlight.min;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index b7a2c102648a..bf9e320c547d 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -83,7 +83,7 @@ static bool psr_global_enabled(struct drm_i915_private *i915)
{
switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- return i915_modparams.enable_psr;
+ return i915->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -426,6 +426,12 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 11)
val |= EDP_PSR_TP4_TIME_0US;
+ if (dev_priv->params.psr_safest_params) {
+ val |= EDP_PSR_TP1_TIME_2500us;
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
+ goto check_tp3_sel;
+ }
+
if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
val |= EDP_PSR_TP1_TIME_0us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
@@ -444,6 +450,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP2_TP3_TIME_2500us;
+check_tp3_sel:
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP1_TP3_SEL;
@@ -495,18 +502,13 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
-static void hsw_activate_psr2(struct intel_dp *intel_dp)
+static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 val;
-
- val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
-
- val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- val |= EDP_Y_COORDINATE_ENABLE;
+ u32 val = 0;
- val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+ if (dev_priv->params.psr_safest_params)
+ return EDP_PSR2_TP2_TIME_2500us;
if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
@@ -518,6 +520,39 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
else
val |= EDP_PSR2_TP2_TIME_2500us;
+ return val;
+}
+
+static void hsw_activate_psr2(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 val;
+
+ val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
+
+ val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ val |= EDP_Y_COORDINATE_ENABLE;
+
+ val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+ val |= intel_psr2_get_tp_time(intel_dp);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ /*
+ * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
+ * values from BSpec. In order to setting an optimal power
+ * consumption, lower than 4k resoluition mode needs to decrese
+ * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
+ * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
+ */
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
+ val |= TGL_EDP_PSR2_FAST_WAKE(7);
+ } else if (INTEL_GEN(dev_priv) >= 9) {
+ val |= EDP_PSR2_IO_BUFFER_WAKE(7);
+ val |= EDP_PSR2_FAST_WAKE(7);
+ }
+
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
@@ -657,6 +692,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->crc_enabled) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
if (INTEL_GEN(dev_priv) >= 12) {
psr_max_h = 5120;
psr_max_v = 3200;
@@ -671,14 +712,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
max_bpp = 24;
}
- if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
- crtc_hdisplay, crtc_vdisplay,
- psr_max_h, psr_max_v);
- return false;
- }
-
if (crtc_state->pipe_bpp > max_bpp) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, pipe bpp %d > max supported %d\n",
@@ -699,9 +732,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (crtc_state->crc_enabled) {
+ /*
+ * Some platforms lack PSR2 HW tracking and instead require manual
+ * tracking by software. In this case, the driver is required to track
+ * the areas that need updates and program hardware to send selective
+ * updates.
+ *
+ * So until the software tracking is implemented, PSR2 needs to be
+ * disabled for platforms without PSR2 HW tracking.
+ */
+ if (!HAS_PSR_HW_TRACKING(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+ "No PSR2 HW tracking in the platform\n");
+ return false;
+ }
+
+ if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
+ crtc_hdisplay, crtc_vdisplay,
+ psr_max_h, psr_max_v);
return false;
}
@@ -855,8 +905,8 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
u32 val;
drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
@@ -1450,9 +1500,9 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
*/
dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
- if (i915_modparams.enable_psr == -1)
+ if (dev_priv->params.enable_psr == -1)
if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
- i915_modparams.enable_psr = 0;
+ dev_priv->params.enable_psr = 0;
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index bc6c26818e15..2da4388e1540 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -94,6 +94,8 @@ struct intel_sdvo {
*/
struct intel_sdvo_caps caps;
+ u8 colorimetry_cap;
+
/* Pixel clock limitations reported by the SDVO device, in kHz */
int pixel_clock_min, pixel_clock_max;
@@ -411,6 +413,7 @@ static const char *sdvo_cmd_name(u8 cmd)
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
const char *cmd_name;
int i, pos = 0;
char buffer[64];
@@ -431,7 +434,7 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
else
BUF_PRINT("(%02X)", cmd);
- WARN_ON(pos >= sizeof(buffer) - 1);
+ drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
@@ -533,6 +536,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
const char *cmd_status;
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
@@ -597,7 +601,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
- WARN_ON(pos >= sizeof(buffer) - 1);
+ drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
@@ -940,6 +944,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
+static bool intel_sdvo_set_pixel_replication(struct intel_sdvo *intel_sdvo,
+ u8 pixel_repeat)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_PIXEL_REPLI,
+ &pixel_repeat, 1);
+}
+
static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
u8 audio_state)
{
@@ -1081,6 +1092,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -1106,7 +1118,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
HDMI_QUANTIZATION_RANGE_FULL);
ret = hdmi_avi_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(&dev_priv->drm, ret))
return false;
return true;
@@ -1115,6 +1127,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
const union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
ssize_t len;
@@ -1123,11 +1136,12 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0)
return true;
- if (WARN_ON(frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
+ if (drm_WARN_ON(&dev_priv->drm,
+ frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
return false;
len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data));
- if (WARN_ON(len < 0))
+ if (drm_WARN_ON(&dev_priv->drm, len < 0))
return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
@@ -1237,6 +1251,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(pipe_config->uapi.crtc->dev);
unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
@@ -1257,7 +1272,8 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
clock->m1 = 12;
clock->m2 = 8;
} else {
- WARN(1, "SDVO TV clock out of range: %i\n", dotclock);
+ drm_WARN(&dev_priv->drm, 1,
+ "SDVO TV clock out of range: %i\n", dotclock);
}
pipe_config->clock_set = true;
@@ -1270,6 +1286,18 @@ static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo,
READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
}
+static bool intel_sdvo_limited_color_range(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+
+ if ((intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220) == 0)
+ return false;
+
+ return intel_hdmi_limited_color_range(crtc_state, conn_state);
+}
+
static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1335,21 +1363,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON;
}
- if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /*
- * See CEA-861-E - 5.1 Default Encoding Parameters
- *
- * FIXME: This bit is only valid when using TMDS encoding and 8
- * bit per color mode.
- */
- if (pipe_config->has_hdmi_sink &&
- drm_match_cea_mode(adjusted_mode) > 1)
- pipe_config->limited_color_range = true;
- } else {
- if (pipe_config->has_hdmi_sink &&
- intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED)
- pipe_config->limited_color_range = true;
- }
+ pipe_config->limited_color_range =
+ intel_sdvo_limited_color_range(encoder, pipe_config,
+ conn_state);
/* Clock computation needs to happen after pixel multiplier. */
if (IS_TV(intel_sdvo_connector))
@@ -1488,8 +1504,13 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
if (crtc_state->has_hdmi_sink) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
+ crtc_state->limited_color_range ?
+ SDVO_COLORIMETRY_RGB220 :
SDVO_COLORIMETRY_RGB256);
intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
+ intel_sdvo_set_pixel_replication(intel_sdvo,
+ !!(adjusted_mode->flags &
+ DRM_MODE_FLAG_DBLCLK));
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@@ -1523,8 +1544,6 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
- sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
@@ -1682,8 +1701,11 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
- if (sdvox & HDMI_COLOR_RANGE_16_235)
- pipe_config->limited_color_range = true;
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY,
+ &val, 1)) {
+ if (val == SDVO_COLORIMETRY_RGB220)
+ pipe_config->limited_color_range = true;
+ }
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
&val, 1)) {
@@ -1843,17 +1865,26 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, connector->state);
+ int clock = mode->clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (intel_sdvo->pixel_clock_min > mode->clock)
- return MODE_CLOCK_LOW;
- if (intel_sdvo->pixel_clock_max < mode->clock)
+ if (clock > max_dotclk)
return MODE_CLOCK_HIGH;
- if (mode->clock > max_dotclk)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ if (!has_hdmi_sink)
+ return MODE_CLOCK_LOW;
+ clock *= 2;
+ }
+
+ if (intel_sdvo->pixel_clock_min > clock)
+ return MODE_CLOCK_LOW;
+
+ if (intel_sdvo->pixel_clock_max < clock)
return MODE_CLOCK_HIGH;
if (IS_LVDS(intel_sdvo_connector)) {
@@ -1907,6 +1938,17 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
+static u8 intel_sdvo_get_colorimetry_cap(struct intel_sdvo *intel_sdvo)
+{
+ u8 cap;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY_CAP,
+ &cap, sizeof(cap)))
+ return SDVO_COLORIMETRY_RGB256;
+
+ return cap;
+}
+
static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
@@ -2093,8 +2135,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
return ret;
}
-static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
+ int num_modes = 0;
struct edid *edid;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2109,18 +2152,19 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* DDC fails, check to see if the analog output is disconnected, in
* which case we'll look there for the digital DDC data.
*/
- if (edid == NULL)
+ if (!edid)
edid = intel_sdvo_get_analog_edid(connector);
- if (edid != NULL) {
- if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
- edid)) {
- drm_connector_update_edid_property(connector, edid);
- drm_add_edid_modes(connector, edid);
- }
+ if (!edid)
+ return 0;
- kfree(edid);
- }
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid))
+ num_modes += intel_connector_update_modes(connector, edid);
+
+ kfree(edid);
+
+ return num_modes;
}
/*
@@ -2188,12 +2232,13 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
-static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
const struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_sdtv_resolution_request tv_res;
u32 reply = 0, format_map = 0;
+ int num_modes = 0;
int i;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2208,31 +2253,37 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
- return;
+ return 0;
BUILD_BUG_ON(sizeof(tv_res) != 3);
if (!intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res)))
- return;
+ return 0;
if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
- return;
+ return 0;
- for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+ for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) {
if (reply & (1 << i)) {
struct drm_display_mode *nmode;
nmode = drm_mode_duplicate(connector->dev,
&sdvo_tv_modes[i]);
- if (nmode)
+ if (nmode) {
drm_mode_probed_add(connector, nmode);
+ num_modes++;
+ }
}
+ }
+
+ return num_modes;
}
-static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+static int intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_display_mode *newmode;
+ int num_modes = 0;
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -2249,6 +2300,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
newmode->type = (DRM_MODE_TYPE_PREFERRED |
DRM_MODE_TYPE_DRIVER);
drm_mode_probed_add(connector, newmode);
+ num_modes++;
}
}
@@ -2257,7 +2309,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+ num_modes += intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
+ return num_modes;
}
static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -2265,13 +2319,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
if (IS_TV(intel_sdvo_connector))
- intel_sdvo_get_tv_modes(connector);
+ return intel_sdvo_get_tv_modes(connector);
else if (IS_LVDS(intel_sdvo_connector))
- intel_sdvo_get_lvds_modes(connector);
+ return intel_sdvo_get_lvds_modes(connector);
else
- intel_sdvo_get_ddc_modes(connector);
-
- return !list_empty(&connector->probed_modes);
+ return intel_sdvo_get_ddc_modes(connector);
}
static int
@@ -2293,7 +2345,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
return 0;
}
- WARN_ON(1);
+ drm_WARN_ON(connector->dev, 1);
*val = 0;
} else if (property == intel_sdvo_connector->top ||
property == intel_sdvo_connector->bottom)
@@ -2662,12 +2714,9 @@ static void
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
-
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
+ if (intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220)
intel_attach_broadcast_rgb_property(&connector->base.base);
- }
intel_attach_aspect_ratio_property(&connector->base.base);
}
@@ -3308,6 +3357,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
+ intel_sdvo->colorimetry_cap =
+ intel_sdvo_get_colorimetry_cap(intel_sdvo);
+
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
index 13b9a8e257bb..74dc6c042b6e 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
@@ -705,10 +705,10 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
#define SDVO_CMD_SET_COLORIMETRY 0x8e
- #define SDVO_COLORIMETRY_RGB256 0x0
- #define SDVO_COLORIMETRY_RGB220 0x1
- #define SDVO_COLORIMETRY_YCrCb422 0x3
- #define SDVO_COLORIMETRY_YCrCb444 0x4
+ #define SDVO_COLORIMETRY_RGB256 (1 << 0)
+ #define SDVO_COLORIMETRY_RGB220 (1 << 1)
+ #define SDVO_COLORIMETRY_YCrCb422 (1 << 2)
+ #define SDVO_COLORIMETRY_YCrCb444 (1 << 3)
#define SDVO_CMD_GET_COLORIMETRY 0x8f
#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
#define SDVO_CMD_SET_AUDIO_STAT 0x91
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 0000ec7055f7..d03860fef2d7 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
@@ -333,6 +334,21 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
+static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
+{
+ if (IS_ROCKETLAKE(i915))
+ return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3);
+ else
+ return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5);
+}
+
+bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ return INTEL_GEN(dev_priv) >= 11 &&
+ icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id);
+}
+
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
{
return INTEL_GEN(dev_priv) >= 11 &&
@@ -3003,7 +3019,7 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
if (icl_is_hdr_plane(dev_priv, plane_id)) {
*num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
return icl_hdr_plane_formats;
- } else if (icl_is_nv12_y_plane(plane_id)) {
+ } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) {
*num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
return icl_sdr_y_plane_formats;
} else {
@@ -3046,6 +3062,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
+ unsigned int supported_csc;
const u64 *modifiers;
const u32 *formats;
int num_formats;
@@ -3120,9 +3137,13 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0,
supported_rotations);
+ supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709);
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020);
+
drm_plane_create_color_properties(&plane->base,
- BIT(DRM_COLOR_YCBCR_BT601) |
- BIT(DRM_COLOR_YCBCR_BT709),
+ supported_csc,
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT709,
@@ -3136,6 +3157,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
+ if (INTEL_GEN(dev_priv) >= 12)
+ drm_plane_enable_fb_damage_clips(&plane->base);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 5eeaa92420d1..cd2104ba1ca1 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -32,21 +32,14 @@ struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
-static inline bool icl_is_nv12_y_plane(enum plane_id id)
-{
- /* Don't need to do a gen check, these planes are only available on gen11 */
- if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
- return true;
-
- return false;
-}
-
static inline u8 icl_hdr_plane_mask(void)
{
return BIT(PLANE_PRIMARY) |
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
}
+bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id);
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b161c15baf86..5b5dc86a5737 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -360,12 +360,12 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
}
if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
- !WARN_ON(dig_port->tc_legacy_port))
+ !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
goto out_set_tbt_alt_mode;
max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
if (dig_port->tc_legacy_port) {
- WARN_ON(max_lanes != 4);
+ drm_WARN_ON(&i915->drm, max_lanes != 4);
dig_port->tc_mode = TC_PORT_LEGACY;
return;
@@ -445,18 +445,20 @@ static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 live_status_mask = tc_port_live_status_mask(dig_port);
bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
enum tc_port_mode mode;
- if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
+ if (in_safe_mode ||
+ drm_WARN_ON(&i915->drm, !icl_tc_phy_status_complete(dig_port)))
return TC_PORT_TBT_ALT;
mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
if (live_status_mask) {
enum tc_port_mode live_mode = fls(live_status_mask) - 1;
- if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
+ if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
mode = live_mode;
}
@@ -505,7 +507,9 @@ static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
int refcount)
{
- WARN_ON(dig_port->tc_link_refcount);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
dig_port->tc_link_refcount = refcount;
}
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index fbe12aad7d58..777032d9697b 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1038,9 +1038,6 @@ intel_tv_mode_to_mode(struct drm_display_mode *mode,
/* TV has it's own notion of sync and other mode flags, so clear them. */
mode->flags = 0;
- mode->vrefresh = 0;
- mode->vrefresh = drm_mode_vrefresh(mode);
-
snprintf(mode->name, sizeof(mode->name),
"%dx%d%c (%s)",
mode->hdisplay, mode->vdisplay,
@@ -1161,7 +1158,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
/* pixel counter doesn't work on i965gm TV output */
if (IS_I965GM(dev_priv))
- adjusted_mode->private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
}
@@ -1331,7 +1328,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
/* pixel counter doesn't work on i965gm TV output */
if (IS_I965GM(dev_priv))
- adjusted_mode->private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index aef7fe932d1a..6faabd4f6d49 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -820,6 +820,7 @@ struct bdb_lfp_power {
u16 adb;
u16 lace_enabled_status;
struct agressiveness_profile_entry aggressivenes[16];
+ u16 hobl; /* 232+ */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 95ad87d4ccb3..c5735c365659 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -476,13 +476,13 @@ intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
* POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases:
*
* - ICL eDP/DSI transcoder
- * - TGL pipe A
+ * - Gen12+ (except RKL) pipe A
*
* For any other pipe, VDSC/joining uses the power well associated with
* the pipe in use. Hence another reference on the pipe power domain
* will suffice. (Except no VDSC/joining on ICL pipe A.)
*/
- if (INTEL_GEN(i915) >= 12 && pipe == PIPE_A)
+ if (INTEL_GEN(i915) >= 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else if (is_pipe_dsc(crtc_state))
return POWER_DOMAIN_PIPE(pipe);
@@ -1045,7 +1045,7 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
@@ -1055,9 +1055,9 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
/* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg);
- intel_dig_port->write_infoframe(encoder, crtc_state,
- DP_SDP_PPS, &dp_dsc_pps_sdp,
- sizeof(dp_dsc_pps_sdp));
+ dig_port->write_infoframe(encoder, crtc_state,
+ DP_SDP_PPS, &dp_dsc_pps_sdp,
+ sizeof(dp_dsc_pps_sdp));
}
void intel_dsc_enable(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f582ab52f0b0..052e0b31a2da 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -298,7 +298,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
if (IS_GEN9_LP(dev_priv)) {
/* Enable Frame time stamp based scanline reporting */
- adjusted_mode->private_flags |=
+ pipe_config->mode_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
/* Dual link goes to DSI transcoder A. */
@@ -1097,8 +1097,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
/* Enable Frame time stamo based scanline reporting */
- adjusted_mode->private_flags |=
- I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
+ pipe_config->mode_flags |=
+ I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index d3a86a4d5c04..278664f831e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -32,16 +32,17 @@ static void vma_clear_pages(struct i915_vma *vma)
vma->pages = NULL;
}
-static int vma_bind(struct i915_vma *vma,
+static int vma_bind(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
+ return vm->vma_ops.bind_vma(vm, vma, cache_level, flags);
}
-static void vma_unbind(struct i915_vma *vma)
+static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{
- vma->vm->vma_ops.unbind_vma(vma);
+ vm->vma_ops.unbind_vma(vm, vma);
}
static const struct i915_vma_ops proxy_vma_ops = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 30c229fcb404..d0bdb6d447ed 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -101,8 +101,7 @@ static void lut_close(struct i915_gem_context *ctx)
struct radix_tree_iter iter;
void __rcu **slot;
- lockdep_assert_held(&ctx->mutex);
-
+ mutex_lock(&ctx->lut_mutex);
rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
@@ -112,8 +111,7 @@ static void lut_close(struct i915_gem_context *ctx)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- rcu_read_unlock();
- i915_gem_object_lock(obj);
+ spin_lock(&obj->lut_lock);
list_for_each_entry(lut, &obj->lut_list, obj_link) {
if (lut->ctx != ctx)
continue;
@@ -124,8 +122,7 @@ static void lut_close(struct i915_gem_context *ctx)
list_del(&lut->obj_link);
break;
}
- i915_gem_object_unlock(obj);
- rcu_read_lock();
+ spin_unlock(&obj->lut_lock);
if (&lut->obj_link != &obj->lut_list) {
i915_lut_handle_free(lut);
@@ -137,6 +134,7 @@ static void lut_close(struct i915_gem_context *ctx)
i915_gem_object_put(obj);
}
rcu_read_unlock();
+ mutex_unlock(&ctx->lut_mutex);
}
static struct intel_context *
@@ -344,6 +342,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
spin_unlock(&ctx->i915->gem.contexts.lock);
mutex_destroy(&ctx->engines_mutex);
+ mutex_destroy(&ctx->lut_mutex);
if (ctx->timeline)
intel_timeline_put(ctx->timeline);
@@ -650,7 +649,7 @@ static void context_close(struct i915_gem_context *ctx)
* context close.
*/
if (!i915_gem_context_is_persistent(ctx) ||
- !i915_modparams.enable_hangcheck)
+ !ctx->i915->params.enable_hangcheck)
kill_context(ctx);
i915_gem_context_put(ctx);
@@ -667,7 +666,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
* reset] are allowed to survive past termination. We require
* hangcheck to ensure that the persistent requests are healthy.
*/
- if (!i915_modparams.enable_hangcheck)
+ if (!ctx->i915->params.enable_hangcheck)
return -EINVAL;
i915_gem_context_set_persistence(ctx);
@@ -727,6 +726,7 @@ __create_context(struct drm_i915_private *i915)
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
+ mutex_init(&ctx->lut_mutex);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
@@ -1314,11 +1314,11 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (vm == rcu_access_pointer(ctx->vm))
goto unlock;
+ old = __set_ppgtt(ctx, vm);
+
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
lut_close(ctx);
- old = __set_ppgtt(ctx, vm);
-
/*
* We need to flush any requests using the current ppgtt before
* we release it as the requests do not hold a reference themselves,
@@ -1332,6 +1332,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (err) {
i915_vm_close(__set_ppgtt(ctx, old));
i915_vm_close(old);
+ lut_close(ctx); /* force a rebuild of the old obj:vma cache */
}
unlock:
@@ -1399,11 +1400,12 @@ static int get_ringsize(struct i915_gem_context *ctx,
}
int
-i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+i915_gem_user_to_context_sseu(struct intel_gt *gt,
const struct drm_i915_gem_context_param_sseu *user,
struct intel_sseu *context)
{
- const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
+ const struct sseu_dev_info *device = &gt->info.sseu;
+ struct drm_i915_private *i915 = gt->i915;
/* No zeros in any field. */
if (!user->slice_mask || !user->subslice_mask ||
@@ -1536,7 +1538,7 @@ static int set_sseu(struct i915_gem_context *ctx,
goto out_ce;
}
- ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu);
+ ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
if (ret)
goto out_ce;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 3702b2fb27ab..a133f92bbedb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -225,7 +225,7 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
-int i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+int i915_gem_user_to_context_sseu(struct intel_gt *gt,
const struct drm_i915_gem_context_param_sseu *user,
struct intel_sseu *context);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 28760bd03265..ae14ca24a11f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -170,6 +170,7 @@ struct i915_gem_context {
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
+ struct mutex lut_mutex;
/**
* @name: arbitrary name, used for user debug
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 7db5a793739d..2679380159fc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -217,6 +217,7 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+ .name = "i915_gem_object_dmabuf",
.get_pages = i915_gem_object_get_pages_dmabuf,
.put_pages = i915_gem_object_put_pages_dmabuf,
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index db8eb1c6afe9..6b4ec66cb558 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -45,13 +45,6 @@ struct eb_vma_array {
struct eb_vma vma[];
};
-enum {
- FORCE_CPU_RELOC = 1,
- FORCE_GTT_RELOC,
- FORCE_GPU_RELOC,
-#define DBG_FORCE_RELOC 0 /* choose one of the above! */
-};
-
#define __EXEC_OBJECT_HAS_PIN BIT(31)
#define __EXEC_OBJECT_HAS_FENCE BIT(30)
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
@@ -260,8 +253,6 @@ struct i915_execbuffer {
*/
struct reloc_cache {
struct drm_mm_node node; /** temporary GTT binding */
- unsigned long vaddr; /** Current kmap address */
- unsigned long page; /** Currently mapped page index */
unsigned int gen; /** Cached value of INTEL_GEN */
bool use_64bit_reloc : 1;
bool has_llc : 1;
@@ -605,23 +596,6 @@ eb_add_vma(struct i915_execbuffer *eb,
}
}
-static inline int use_cpu_reloc(const struct reloc_cache *cache,
- const struct drm_i915_gem_object *obj)
-{
- if (!i915_gem_object_has_struct_page(obj))
- return false;
-
- if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
- return true;
-
- if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
- return false;
-
- return (cache->has_llc ||
- obj->cache_dirty ||
- obj->cache_level != I915_CACHE_NONE);
-}
-
static int eb_reserve_vma(const struct i915_execbuffer *eb,
struct eb_vma *ev,
u64 pin_flags)
@@ -808,23 +782,28 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
/* Check that the context hasn't been closed in the meantime */
err = -EINTR;
- if (!mutex_lock_interruptible(&ctx->mutex)) {
- err = -ENOENT;
- if (likely(!i915_gem_context_is_closed(ctx)))
+ if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
+ struct i915_address_space *vm = rcu_access_pointer(ctx->vm);
+
+ if (unlikely(vm && vma->vm != vm))
+ err = -EAGAIN; /* user racing with ctx set-vm */
+ else if (likely(!i915_gem_context_is_closed(ctx)))
err = radix_tree_insert(&ctx->handles_vma, handle, vma);
+ else
+ err = -ENOENT;
if (err == 0) { /* And nor has this handle */
struct drm_i915_gem_object *obj = vma->obj;
- i915_gem_object_lock(obj);
+ spin_lock(&obj->lut_lock);
if (idr_find(&eb->file->object_idr, handle) == obj) {
list_add(&lut->obj_link, &obj->lut_list);
} else {
radix_tree_delete(&ctx->handles_vma, handle);
err = -ENOENT;
}
- i915_gem_object_unlock(obj);
+ spin_unlock(&obj->lut_lock);
}
- mutex_unlock(&ctx->mutex);
+ mutex_unlock(&ctx->lut_mutex);
}
if (unlikely(err))
goto err;
@@ -840,6 +819,8 @@ err:
static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
{
+ struct i915_address_space *vm = eb->context->vm;
+
do {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -847,7 +828,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
rcu_read_lock();
vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
- if (likely(vma))
+ if (likely(vma && vma->vm == vm))
vma = i915_vma_tryget(vma);
rcu_read_unlock();
if (likely(vma))
@@ -857,7 +838,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
if (unlikely(!obj))
return ERR_PTR(-ENOENT);
- vma = i915_vma_instance(obj, eb->context->vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
@@ -945,8 +926,6 @@ relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
static void reloc_cache_init(struct reloc_cache *cache,
struct drm_i915_private *i915)
{
- cache->page = -1;
- cache->vaddr = 0;
/* Must be a variable in the struct to allow GCC to unroll. */
cache->gen = INTEL_GEN(i915);
cache->has_llc = HAS_LLC(i915);
@@ -958,25 +937,6 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->target = NULL;
}
-static inline void *unmask_page(unsigned long p)
-{
- return (void *)(uintptr_t)(p & PAGE_MASK);
-}
-
-static inline unsigned int unmask_flags(unsigned long p)
-{
- return p & ~PAGE_MASK;
-}
-
-#define KMAP 0x4 /* after CLFLUSH_FLAGS */
-
-static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
-{
- struct drm_i915_private *i915 =
- container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
- return &i915->ggtt;
-}
-
#define RELOC_TAIL 4
static int reloc_gpu_chain(struct reloc_cache *cache)
@@ -1089,181 +1049,6 @@ static int reloc_gpu_flush(struct reloc_cache *cache)
return err;
}
-static void reloc_cache_reset(struct reloc_cache *cache)
-{
- void *vaddr;
-
- if (!cache->vaddr)
- return;
-
- vaddr = unmask_page(cache->vaddr);
- if (cache->vaddr & KMAP) {
- if (cache->vaddr & CLFLUSH_AFTER)
- mb();
-
- kunmap_atomic(vaddr);
- i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
- } else {
- struct i915_ggtt *ggtt = cache_to_ggtt(cache);
-
- intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- io_mapping_unmap_atomic((void __iomem *)vaddr);
-
- if (drm_mm_node_allocated(&cache->node)) {
- ggtt->vm.clear_range(&ggtt->vm,
- cache->node.start,
- cache->node.size);
- mutex_lock(&ggtt->vm.mutex);
- drm_mm_remove_node(&cache->node);
- mutex_unlock(&ggtt->vm.mutex);
- } else {
- i915_vma_unpin((struct i915_vma *)cache->node.mm);
- }
- }
-
- cache->vaddr = 0;
- cache->page = -1;
-}
-
-static void *reloc_kmap(struct drm_i915_gem_object *obj,
- struct reloc_cache *cache,
- unsigned long page)
-{
- void *vaddr;
-
- if (cache->vaddr) {
- kunmap_atomic(unmask_page(cache->vaddr));
- } else {
- unsigned int flushes;
- int err;
-
- err = i915_gem_object_prepare_write(obj, &flushes);
- if (err)
- return ERR_PTR(err);
-
- BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
- BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
-
- cache->vaddr = flushes | KMAP;
- cache->node.mm = (void *)obj;
- if (flushes)
- mb();
- }
-
- vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
- cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
- cache->page = page;
-
- return vaddr;
-}
-
-static void *reloc_iomap(struct drm_i915_gem_object *obj,
- struct reloc_cache *cache,
- unsigned long page)
-{
- struct i915_ggtt *ggtt = cache_to_ggtt(cache);
- unsigned long offset;
- void *vaddr;
-
- if (cache->vaddr) {
- intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
- } else {
- struct i915_vma *vma;
- int err;
-
- if (i915_gem_object_is_tiled(obj))
- return ERR_PTR(-EINVAL);
-
- if (use_cpu_reloc(cache, obj))
- return NULL;
-
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
- if (err)
- return ERR_PTR(err);
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
- if (IS_ERR(vma)) {
- memset(&cache->node, 0, sizeof(cache->node));
- mutex_lock(&ggtt->vm.mutex);
- err = drm_mm_insert_node_in_range
- (&ggtt->vm.mm, &cache->node,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- mutex_unlock(&ggtt->vm.mutex);
- if (err) /* no inactive aperture space, use cpu reloc */
- return NULL;
- } else {
- cache->node.start = vma->node.start;
- cache->node.mm = (void *)vma;
- }
- }
-
- offset = cache->node.start;
- if (drm_mm_node_allocated(&cache->node)) {
- ggtt->vm.insert_page(&ggtt->vm,
- i915_gem_object_get_dma_address(obj, page),
- offset, I915_CACHE_NONE, 0);
- } else {
- offset += page << PAGE_SHIFT;
- }
-
- vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
- offset);
- cache->page = page;
- cache->vaddr = (unsigned long)vaddr;
-
- return vaddr;
-}
-
-static void *reloc_vaddr(struct drm_i915_gem_object *obj,
- struct reloc_cache *cache,
- unsigned long page)
-{
- void *vaddr;
-
- if (cache->page == page) {
- vaddr = unmask_page(cache->vaddr);
- } else {
- vaddr = NULL;
- if ((cache->vaddr & KMAP) == 0)
- vaddr = reloc_iomap(obj, cache, page);
- if (!vaddr)
- vaddr = reloc_kmap(obj, cache, page);
- }
-
- return vaddr;
-}
-
-static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
-{
- if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
- if (flushes & CLFLUSH_BEFORE) {
- clflushopt(addr);
- mb();
- }
-
- *addr = value;
-
- /*
- * Writes to the same cacheline are serialised by the CPU
- * (including clflush). On the write path, we only require
- * that it hits memory in an orderly fashion and place
- * mb barriers at the start and end of the relocation phase
- * to ensure ordering of clflush wrt to the system.
- */
- if (flushes & CLFLUSH_AFTER)
- clflushopt(addr);
- } else
- *addr = value;
-}
-
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -1429,17 +1214,6 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
return cmd;
}
-static inline bool use_reloc_gpu(struct i915_vma *vma)
-{
- if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
- return true;
-
- if (DBG_FORCE_RELOC)
- return false;
-
- return !dma_resv_test_signaled_rcu(vma->resv, true);
-}
-
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
{
struct page *page;
@@ -1454,10 +1228,10 @@ static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
return addr + offset_in_page(offset);
}
-static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
- struct i915_vma *vma,
- u64 offset,
- u64 target_addr)
+static int __reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
{
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
@@ -1473,7 +1247,7 @@ static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
- return false;
+ return PTR_ERR(batch);
addr = gen8_canonical_addr(vma->node.start + offset);
if (gen >= 8) {
@@ -1522,55 +1296,21 @@ static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
*batch++ = target_addr;
}
- return true;
-}
-
-static bool reloc_entry_gpu(struct i915_execbuffer *eb,
- struct i915_vma *vma,
- u64 offset,
- u64 target_addr)
-{
- if (eb->reloc_cache.vaddr)
- return false;
-
- if (!use_reloc_gpu(vma))
- return false;
-
- return __reloc_entry_gpu(eb, vma, offset, target_addr);
+ return 0;
}
static u64
-relocate_entry(struct i915_vma *vma,
+relocate_entry(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
const struct drm_i915_gem_relocation_entry *reloc,
- struct i915_execbuffer *eb,
const struct i915_vma *target)
{
u64 target_addr = relocation_target(reloc, target);
- u64 offset = reloc->offset;
-
- if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
- bool wide = eb->reloc_cache.use_64bit_reloc;
- void *vaddr;
-
-repeat:
- vaddr = reloc_vaddr(vma->obj,
- &eb->reloc_cache,
- offset >> PAGE_SHIFT);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
-
- GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
- clflush_write32(vaddr + offset_in_page(offset),
- lower_32_bits(target_addr),
- eb->reloc_cache.vaddr);
-
- if (wide) {
- offset += sizeof(u32);
- target_addr >>= 32;
- wide = false;
- goto repeat;
- }
- }
+ int err;
+
+ err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr);
+ if (err)
+ return err;
return target->node.start | UPDATE;
}
@@ -1626,8 +1366,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
err = i915_vma_bind(target->vma,
target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
- if (WARN_ONCE(err,
- "Unexpected failure to bind target VMA!"))
+ if (err)
return err;
}
}
@@ -1636,8 +1375,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* If the relocation already has the right value in it, no
* more work needs to be done.
*/
- if (!DBG_FORCE_RELOC &&
- gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
+ if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
@@ -1669,7 +1407,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
- return relocate_entry(ev->vma, reloc, eb, target->vma);
+ return relocate_entry(eb, ev->vma, reloc, target->vma);
}
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
@@ -1707,10 +1445,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* this is bad and so lockdep complains vehemently.
*/
copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
- if (unlikely(copied)) {
- remain = -EFAULT;
- goto out;
- }
+ if (unlikely(copied))
+ return -EFAULT;
remain -= count;
do {
@@ -1718,8 +1454,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
- remain = (int)offset;
- goto out;
+ return (int)offset;
} else {
/*
* Note that reporting an error now
@@ -1749,9 +1484,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
} while (remain);
-out:
- reloc_cache_reset(&eb->reloc_cache);
- return remain;
+
+ return 0;
}
static int eb_relocate(struct i915_execbuffer *eb)
@@ -1911,8 +1645,8 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
- if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
- drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
+ if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) {
+ drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -2246,8 +1980,7 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
static int num_vcs_engines(const struct drm_i915_private *i915)
{
- return hweight64(INTEL_INFO(i915)->engine_mask &
- GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
+ return hweight64(VDBOX_MASK(&i915->gt));
}
/*
@@ -2659,7 +2392,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.i915 = i915;
eb.file = file;
eb.args = args;
- if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
+ if (!(args->flags & I915_EXEC_NO_RELOC))
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index cbbff81aa0af..ad22f42541bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -137,6 +137,7 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
+ .name = "i915_gem_object_internal",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_internal,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 70543c83df06..932ee21e6609 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -9,6 +9,7 @@
#include "i915_drv.h"
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+ .name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
.get_pages = i915_gem_object_get_pages_buddy,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index fe45bd4d63a5..b23368529a40 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -216,12 +216,12 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
case -ENXIO: /* unable to access backing store (on device) */
return VM_FAULT_SIGBUS;
- case -ENOSPC: /* shmemfs allocation failure */
case -ENOMEM: /* our allocation failure */
return VM_FAULT_OOM;
case 0:
case -EAGAIN:
+ case -ENOSPC: /* transient failure to evict? */
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
@@ -448,7 +448,7 @@ void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
* mapping will then trigger a page fault on the next user access, allowing
* fixup by vm_fault_gtt().
*/
-static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
+void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
@@ -507,19 +507,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
spin_unlock(&obj->mmo.lock);
}
-/**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- */
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_release_mmap_gtt(obj);
- i915_gem_object_release_mmap_offset(obj);
-}
-
static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index 862e01b7cb69..efee9e0d2508 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -25,7 +25,8 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
u32 handle, u64 *offset);
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 99356c00c19e..c8421fd9d2dc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -53,7 +53,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops,
struct lock_class_key *key)
{
- __mutex_init(&obj->mm.lock, "obj->mm.lock", key);
+ __mutex_init(&obj->mm.lock, ops->name ?: "obj->mm.lock", key);
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
@@ -61,6 +61,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->mm.link);
INIT_LIST_HEAD(&obj->lut_list);
+ spin_lock_init(&obj->lut_lock);
spin_lock_init(&obj->mmo.lock);
obj->mmo.offsets = RB_ROOT;
@@ -72,6 +73,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
+
+ if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj))
+ i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev),
+ &obj->mm.lock);
}
/**
@@ -100,21 +105,29 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_lut_handle bookmark = {};
struct i915_mmap_offset *mmo, *mn;
struct i915_lut_handle *lut, *ln;
LIST_HEAD(close);
- i915_gem_object_lock(obj);
+ spin_lock(&obj->lut_lock);
list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
struct i915_gem_context *ctx = lut->ctx;
- if (ctx->file_priv != fpriv)
- continue;
+ if (ctx && ctx->file_priv == fpriv) {
+ i915_gem_context_get(ctx);
+ list_move(&lut->obj_link, &close);
+ }
- i915_gem_context_get(ctx);
- list_move(&lut->obj_link, &close);
+ /* Break long locks, and carefully continue on from this spot */
+ if (&ln->obj_link != &obj->lut_list) {
+ list_add_tail(&bookmark.obj_link, &ln->obj_link);
+ if (cond_resched_lock(&obj->lut_lock))
+ list_safe_reset_next(&bookmark, ln, obj_link);
+ __list_del_entry(&bookmark.obj_link);
+ }
}
- i915_gem_object_unlock(obj);
+ spin_unlock(&obj->lut_lock);
spin_lock(&obj->mmo.lock);
rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
@@ -130,14 +143,14 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
* vma, in the same fd namespace, by virtue of flink/open.
*/
- mutex_lock(&ctx->mutex);
+ mutex_lock(&ctx->lut_mutex);
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
if (vma) {
GEM_BUG_ON(vma->obj != obj);
GEM_BUG_ON(!atomic_read(&vma->open_count));
i915_vma_close(vma);
}
- mutex_unlock(&ctx->mutex);
+ mutex_unlock(&ctx->lut_mutex);
i915_gem_context_put(lut->ctx);
i915_lut_handle_free(lut);
@@ -158,14 +171,35 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
atomic_dec(&i915->mm.free_count);
}
+static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
+{
+ /* Skip serialisation and waking the device if known to be not used. */
+
+ if (obj->userfault_count)
+ i915_gem_object_release_mmap_gtt(obj);
+
+ if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
+ struct i915_mmap_offset *mmo, *mn;
+
+ i915_gem_object_release_mmap_offset(obj);
+
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets,
+ offset) {
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
+ }
+ obj->mmo.offsets = RB_ROOT;
+ }
+}
+
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_mmap_offset *mmo, *mn;
-
trace_i915_gem_object_destroy(obj);
if (!list_empty(&obj->vma.list)) {
@@ -191,18 +225,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
spin_unlock(&obj->vma.lock);
}
- i915_gem_object_release_mmap(obj);
-
- rbtree_postorder_for_each_entry_safe(mmo, mn,
- &obj->mmo.offsets,
- offset) {
- drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
- &mmo->vma_node);
- kfree(mmo);
- }
- obj->mmo.offsets = RB_ROOT;
+ __i915_gem_object_free_mmaps(obj);
- GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
atomic_set(&obj->mm.pages_pin_count, 0);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 2faa481cc18f..e5b9276d254c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -258,10 +258,6 @@ struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
unsigned int n);
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n);
-
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
@@ -394,6 +390,8 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
+void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
+
void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index f457d7130491..bfdb32d46877 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -126,6 +126,17 @@ void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
intel_engine_pm_put(ce->engine);
}
+static int
+move_obj_to_gpu(struct drm_i915_gem_object *obj,
+ struct i915_request *rq,
+ bool write)
+{
+ if (obj->cache_dirty & ~obj->cache_coherent)
+ i915_gem_clflush_object(obj, 0);
+
+ return i915_request_await_object(rq, obj, write);
+}
+
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value)
@@ -143,12 +154,6 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
if (unlikely(err))
return err;
- if (obj->cache_dirty & ~obj->cache_coherent) {
- i915_gem_object_lock(obj);
- i915_gem_clflush_object(obj, 0);
- i915_gem_object_unlock(obj);
- }
-
batch = intel_emit_vma_fill_blt(ce, vma, value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -165,27 +170,22 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
if (unlikely(err))
goto out_request;
- err = i915_request_await_object(rq, obj, true);
- if (unlikely(err))
- goto out_request;
-
- if (ce->engine->emit_init_breadcrumb) {
- err = ce->engine->emit_init_breadcrumb(rq);
- if (unlikely(err))
- goto out_request;
- }
-
i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, true);
+ err = move_obj_to_gpu(vma->obj, rq, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (unlikely(err))
goto out_request;
- err = ce->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
+ if (ce->engine->emit_init_breadcrumb)
+ err = ce->engine->emit_init_breadcrumb(rq);
+
+ if (likely(!err))
+ err = ce->engine->emit_bb_start(rq,
+ batch->node.start,
+ batch->node.size,
+ 0);
out_request:
if (unlikely(err))
i915_request_set_error_once(rq, err);
@@ -317,16 +317,6 @@ out_pm:
return ERR_PTR(err);
}
-static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
-{
- struct drm_i915_gem_object *obj = vma->obj;
-
- if (obj->cache_dirty & ~obj->cache_coherent)
- i915_gem_clflush_object(obj, 0);
-
- return i915_request_await_object(rq, obj, write);
-}
-
int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
struct drm_i915_gem_object *dst,
struct intel_context *ce)
@@ -375,7 +365,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
goto out_request;
for (i = 0; i < ARRAY_SIZE(vma); i++) {
- err = move_to_gpu(vma[i], rq, i);
+ err = move_obj_to_gpu(vma[i]->obj, rq, i);
if (unlikely(err))
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 54ee658bb168..5335f799b548 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -61,6 +61,8 @@ struct drm_i915_gem_object_ops {
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
void (*release)(struct drm_i915_gem_object *obj);
+
+ const char *name; /* friendly name for debug, e.g. lockdep classes */
};
enum i915_mmap_type {
@@ -119,6 +121,7 @@ struct drm_i915_gem_object {
* this translation from object to context->handles_vma.
*/
struct list_head lut_list;
+ spinlock_t lut_lock; /* guards lut_list */
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index af9e48ee4a33..7050519c87a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -408,6 +408,21 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
}
}
+void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
+{
+ GEM_BUG_ON(!obj->mm.mapping);
+
+ /*
+ * We allow removing the mapping from underneath pinned pages!
+ *
+ * Furthermore, since this is an unsafe operation reserved only
+ * for construction time manipulation, we ignore locking prudence.
+ */
+ unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
+
+ i915_gem_object_unpin_map(obj);
+}
+
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n,
@@ -533,20 +548,6 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
return nth_page(sg_page(sg), offset);
}
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n)
-{
- struct page *page;
-
- page = i915_gem_object_get_page(obj, n);
- if (!obj->mm.dirty)
- set_page_dirty(page);
-
- return page;
-}
-
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 7fe9831aa9ba..28147aab47b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -27,7 +27,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
void *dst;
int i;
- if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
/*
@@ -140,6 +140,7 @@ static void phys_release(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+ .name = "i915_gem_object_phys",
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 7aff3514d97a..38113d3c0138 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -147,8 +147,7 @@ rebuild_st:
last_pfn = page_to_pfn(page);
/* Check that the i965g/gm workaround works. */
- drm_WARN_ON(&i915->drm,
- (gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
+ GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
}
if (sg) { /* loop terminated early; short sg table */
sg_page_sizes |= sg->length;
@@ -430,6 +429,7 @@ static void shmem_release(struct drm_i915_gem_object *obj)
}
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
+ .name = "i915_gem_object_shmem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 5b65ce738b16..dc8f052a0ffe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -13,6 +13,8 @@
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
+#include "gt/intel_gt_requests.h"
+
#include "i915_trace.h"
static bool swap_available(void)
@@ -111,15 +113,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
unsigned long count = 0;
unsigned long scanned = 0;
- /*
- * When shrinking the active list, we should also consider active
- * contexts. Active contexts are pinned until they are retired, and
- * so can not be simply unbound to retire and unpin their pages. To
- * shrink the contexts, we must wait until the gpu is idle and
- * completed its switch to the kernel context. In short, we do
- * not have a good mechanism for idling a specific context.
- */
-
trace_i915_gem_shrink(i915, target, shrink);
/*
@@ -134,6 +127,20 @@ i915_gem_shrink(struct drm_i915_private *i915,
}
/*
+ * When shrinking the active list, we should also consider active
+ * contexts. Active contexts are pinned until they are retired, and
+ * so can not be simply unbound to retire and unpin their pages. To
+ * shrink the contexts, we must wait until the gpu is idle and
+ * completed its switch to the kernel context. In short, we do
+ * not have a good mechanism for idling a specific context, but
+ * what we can do is give them a kick so that we do not keep idle
+ * contexts around longer than is necessary.
+ */
+ if (shrink & I915_SHRINK_ACTIVE)
+ /* Retire requests to unpin all idle contexts */
+ intel_gt_retire_requests(&i915->gt);
+
+ /*
* As we may completely rewrite the (un)bound list whilst unbinding
* (due to retiring requests) we have to strictly process only
* one element of the list at the time, and recheck the list
@@ -408,26 +415,15 @@ void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
struct mutex *mutex)
{
- bool unlock = false;
-
if (!IS_ENABLED(CONFIG_LOCKDEP))
return;
- if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
- mutex_acquire(&i915->drm.struct_mutex.dep_map,
- I915_MM_NORMAL, 0, _RET_IP_);
- unlock = true;
- }
-
fs_reclaim_acquire(GFP_KERNEL);
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
mutex_release(&mutex->dep_map, _RET_IP_);
fs_reclaim_release(GFP_KERNEL);
-
- if (unlock)
- mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
}
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index dc250278bd2c..e0f21f12d3ce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -566,6 +566,7 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
+ .name = "i915_gem_object_stolen",
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
.release = i915_gem_object_release_stolen,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 0158e49bf9bb..ff72ee2fd9cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -299,7 +299,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
i915_gem_object_unlock(obj);
/* Force the fence to be reacquired for GTT access */
- i915_gem_object_release_mmap(obj);
+ i915_gem_object_release_mmap_gtt(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index c31a6744daee..2c2bf24140c9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -21,7 +21,7 @@ struct i915_mm_struct {
struct i915_mmu_notifier *mn;
struct hlist_node node;
struct kref kref;
- struct work_struct work;
+ struct rcu_work work;
};
#if defined(CONFIG_MMU_NOTIFIER)
@@ -189,40 +189,31 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct *mm)
{
- struct i915_mmu_notifier *mn;
- int err = 0;
+ struct i915_mmu_notifier *mn, *old;
+ int err;
- mn = mm->mn;
- if (mn)
+ mn = READ_ONCE(mm->mn);
+ if (likely(mn))
return mn;
mn = i915_mmu_notifier_create(mm);
if (IS_ERR(mn))
- err = PTR_ERR(mn);
-
- mmap_write_lock(mm->mm);
- mutex_lock(&mm->i915->mm_lock);
- if (mm->mn == NULL && !err) {
- /* Protected by mmap_lock (write-lock) */
- err = __mmu_notifier_register(&mn->mn, mm->mm);
- if (!err) {
- /* Protected by mm_lock */
- mm->mn = fetch_and_zero(&mn);
- }
- } else if (mm->mn) {
- /*
- * Someone else raced and successfully installed the mmu
- * notifier, we can cancel our own errors.
- */
- err = 0;
+ return mn;
+
+ err = mmu_notifier_register(&mn->mn, mm->mm);
+ if (err) {
+ kfree(mn);
+ return ERR_PTR(err);
}
- mutex_unlock(&mm->i915->mm_lock);
- mmap_write_unlock(mm->mm);
- if (mn && !IS_ERR(mn))
+ old = cmpxchg(&mm->mn, NULL, mn);
+ if (old) {
+ mmu_notifier_unregister(&mn->mn, mm->mm);
kfree(mn);
+ mn = old;
+ }
- return err ? ERR_PTR(err) : mm->mn;
+ return mn;
}
static int
@@ -235,7 +226,7 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
if (flags & I915_USERPTR_UNSYNCHRONIZED)
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
- if (WARN_ON(obj->userptr.mm == NULL))
+ if (GEM_WARN_ON(!obj->userptr.mm))
return -EINVAL;
mn = i915_mmu_notifier_find(obj->userptr.mm);
@@ -301,23 +292,28 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
#endif
static struct i915_mm_struct *
-__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
+__i915_mm_struct_find(struct drm_i915_private *i915, struct mm_struct *real)
{
- struct i915_mm_struct *mm;
-
- /* Protected by dev_priv->mm_lock */
- hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
- if (mm->mm == real)
- return mm;
+ struct i915_mm_struct *it, *mm = NULL;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(i915->mm_structs,
+ it, node,
+ (unsigned long)real)
+ if (it->mm == real && kref_get_unless_zero(&it->kref)) {
+ mm = it;
+ break;
+ }
+ rcu_read_unlock();
- return NULL;
+ return mm;
}
static int
i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_mm_struct *mm;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_mm_struct *mm, *new;
int ret = 0;
/* During release of the GEM object we hold the struct_mutex. This
@@ -330,39 +326,42 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
* struct_mutex, i.e. we need to schedule a worker to do the clean
* up.
*/
- mutex_lock(&dev_priv->mm_lock);
- mm = __i915_mm_struct_find(dev_priv, current->mm);
- if (mm == NULL) {
- mm = kmalloc(sizeof(*mm), GFP_KERNEL);
- if (mm == NULL) {
- ret = -ENOMEM;
- goto out;
- }
+ mm = __i915_mm_struct_find(i915, current->mm);
+ if (mm)
+ goto out;
- kref_init(&mm->kref);
- mm->i915 = to_i915(obj->base.dev);
+ new = kmalloc(sizeof(*mm), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
- mm->mm = current->mm;
+ kref_init(&new->kref);
+ new->i915 = to_i915(obj->base.dev);
+ new->mm = current->mm;
+ new->mn = NULL;
+
+ spin_lock(&i915->mm_lock);
+ mm = __i915_mm_struct_find(i915, current->mm);
+ if (!mm) {
+ hash_add_rcu(i915->mm_structs,
+ &new->node,
+ (unsigned long)new->mm);
mmgrab(current->mm);
+ mm = new;
+ }
+ spin_unlock(&i915->mm_lock);
+ if (mm != new)
+ kfree(new);
- mm->mn = NULL;
-
- /* Protected by dev_priv->mm_lock */
- hash_add(dev_priv->mm_structs,
- &mm->node, (unsigned long)mm->mm);
- } else
- kref_get(&mm->kref);
-
- obj->userptr.mm = mm;
out:
- mutex_unlock(&dev_priv->mm_lock);
+ obj->userptr.mm = mm;
return ret;
}
static void
__i915_mm_struct_free__worker(struct work_struct *work)
{
- struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
+ struct i915_mm_struct *mm = container_of(work, typeof(*mm), work.work);
+
i915_mmu_notifier_free(mm->mn, mm->mm);
mmdrop(mm->mm);
kfree(mm);
@@ -373,12 +372,12 @@ __i915_mm_struct_free(struct kref *kref)
{
struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
- /* Protected by dev_priv->mm_lock */
- hash_del(&mm->node);
- mutex_unlock(&mm->i915->mm_lock);
+ spin_lock(&mm->i915->mm_lock);
+ hash_del_rcu(&mm->node);
+ spin_unlock(&mm->i915->mm_lock);
- INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
- queue_work(mm->i915->mm.userptr_wq, &mm->work);
+ INIT_RCU_WORK(&mm->work, __i915_mm_struct_free__worker);
+ queue_rcu_work(system_wq, &mm->work);
}
static void
@@ -387,9 +386,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
if (obj->userptr.mm == NULL)
return;
- kref_put_mutex(&obj->userptr.mm->kref,
- __i915_mm_struct_free,
- &to_i915(obj->base.dev)->mm_lock);
+ kref_put(&obj->userptr.mm->kref, __i915_mm_struct_free);
obj->userptr.mm = NULL;
}
@@ -472,7 +469,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
locked = 1;
}
ret = pin_user_pages_remote
- (work->task, mm,
+ (mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
flags,
@@ -712,6 +709,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
+ .name = "i915_gem_object_userptr",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_NO_MMAP |
@@ -850,7 +848,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->mm_lock);
+ spin_lock_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs);
dev_priv->mm.userptr_wq =
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 2b46c6530da9..a768ec61e966 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -88,6 +88,7 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops huge_ops = {
+ .name = "huge-gem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = huge_get_pages,
.put_pages = huge_put_pages,
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index c9988b6d5c88..8291ede6902c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -139,6 +139,7 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops huge_page_ops = {
+ .name = "huge-gem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = get_huge_pages,
@@ -283,12 +284,14 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops fake_ops = {
+ .name = "fake-gem",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = fake_get_huge_pages,
.put_pages = fake_put_huge_pages,
};
static const struct drm_i915_gem_object_ops fake_ops_single = {
+ .name = "fake-gem",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = fake_get_huge_pages_single,
.put_pages = fake_put_huge_pages,
@@ -1409,147 +1412,6 @@ out:
return err;
}
-static int igt_ppgtt_pin_update(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *dev_priv = ctx->i915;
- unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
- struct drm_i915_gem_object *obj;
- struct i915_gem_engines_iter it;
- struct i915_address_space *vm;
- struct intel_context *ce;
- struct i915_vma *vma;
- unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
- unsigned int n;
- int first, last;
- int err = 0;
-
- /*
- * Make sure there's no funny business when doing a PIN_UPDATE -- in the
- * past we had a subtle issue with being able to incorrectly do multiple
- * alloc va ranges on the same object when doing a PIN_UPDATE, which
- * resulted in some pretty nasty bugs, though only when using
- * huge-gtt-pages.
- */
-
- vm = i915_gem_context_get_vm_rcu(ctx);
- if (!i915_vm_is_4lvl(vm)) {
- pr_info("48b PPGTT not supported, skipping\n");
- goto out_vm;
- }
-
- first = ilog2(I915_GTT_PAGE_SIZE_64K);
- last = ilog2(I915_GTT_PAGE_SIZE_2M);
-
- for_each_set_bit_from(first, &supported, last + 1) {
- unsigned int page_size = BIT(first);
-
- obj = i915_gem_object_create_internal(dev_priv, page_size);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_vm;
- }
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_put;
- }
-
- err = i915_vma_pin(vma, SZ_2M, 0, flags);
- if (err)
- goto out_put;
-
- if (vma->page_sizes.sg < page_size) {
- pr_info("Unable to allocate page-size %x, finishing test early\n",
- page_size);
- goto out_unpin;
- }
-
- err = igt_check_page_sizes(vma);
- if (err)
- goto out_unpin;
-
- if (vma->page_sizes.gtt != page_size) {
- dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
-
- /*
- * The only valid reason for this to ever fail would be
- * if the dma-mapper screwed us over when we did the
- * dma_map_sg(), since it has the final say over the dma
- * address.
- */
- if (IS_ALIGNED(addr, page_size)) {
- pr_err("page_sizes.gtt=%u, expected=%u\n",
- vma->page_sizes.gtt, page_size);
- err = -EINVAL;
- } else {
- pr_info("dma address misaligned, finishing test early\n");
- }
-
- goto out_unpin;
- }
-
- err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
- if (err)
- goto out_unpin;
-
- i915_vma_unpin(vma);
- i915_gem_object_put(obj);
- }
-
- obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_vm;
- }
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_put;
- }
-
- err = i915_vma_pin(vma, 0, 0, flags);
- if (err)
- goto out_put;
-
- /*
- * Make sure we don't end up with something like where the pde is still
- * pointing to the 2M page, and the pt we just filled-in is dangling --
- * we can check this by writing to the first page where it would then
- * land in the now stale 2M page.
- */
-
- n = 0;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- if (!intel_engine_can_store_dword(ce->engine))
- continue;
-
- err = gpu_write(ce, vma, n++, 0xdeadbeaf);
- if (err)
- break;
- }
- i915_gem_context_unlock_engines(ctx);
- if (err)
- goto out_unpin;
-
- while (n--) {
- err = cpu_check(obj, n, 0xdeadbeaf);
- if (err)
- goto out_unpin;
- }
-
-out_unpin:
- i915_vma_unpin(vma);
-out_put:
- i915_gem_object_put(obj);
-out_vm:
- i915_vm_put(vm);
-
- return err;
-}
-
static int igt_tmpfs_fallback(void *arg)
{
struct i915_gem_context *ctx = arg;
@@ -1760,7 +1622,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_shrink_thp),
- SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback),
SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_sanity_check),
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 8fe3ad2ee34e..299c29e9ad86 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -702,8 +702,5 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- if (!HAS_ENGINE(i915, BCS0))
- return 0;
-
return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index b81978890641..7ffc3c751432 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1229,7 +1229,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
int inst = 0;
int ret = 0;
- if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg)
+ if (INTEL_GEN(i915) < 9)
return 0;
if (flags & TEST_RESET)
@@ -1255,6 +1255,9 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
if (hweight32(engine->sseu.slice_mask) < 2)
continue;
+ if (!engine->gt->info.sseu.has_slice_pg)
+ continue;
+
/*
* Gen11 VME friendly power-gated configuration with
* half enabled sub-slices.
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index a49016f8ee0d..57c14d3340cd 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -37,20 +37,14 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
return err;
/* 8-Byte aligned */
- if (!__reloc_entry_gpu(eb, vma,
- offsets[0] * sizeof(u32),
- 0)) {
- err = -EIO;
+ err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
+ if (err)
goto unpin_vma;
- }
/* !8-Byte aligned */
- if (!__reloc_entry_gpu(eb, vma,
- offsets[1] * sizeof(u32),
- 1)) {
- err = -EIO;
+ err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
+ if (err)
goto unpin_vma;
- }
/* Skip to the end of the cmd page */
i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
@@ -60,12 +54,9 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
eb->reloc_cache.rq_size += i;
/* Force batch chaining */
- if (!__reloc_entry_gpu(eb, vma,
- offsets[2] * sizeof(u32),
- 2)) {
- err = -EIO;
+ err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
+ if (err)
goto unpin_vma;
- }
GEM_BUG_ON(!eb->reloc_cache.rq);
rq = i915_request_get(eb->reloc_cache.rq);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 31549ad83fa6..23b6e11bbc3e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -193,7 +193,7 @@ err_src:
}
struct igt_thread_arg {
- struct drm_i915_private *i915;
+ struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
struct file *file;
struct rnd_state prng;
@@ -203,7 +203,7 @@ struct igt_thread_arg {
static int igt_fill_blt_thread(void *arg)
{
struct igt_thread_arg *thread = arg;
- struct drm_i915_private *i915 = thread->i915;
+ struct intel_engine_cs *engine = thread->engine;
struct rnd_state *prng = &thread->prng;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
@@ -215,7 +215,7 @@ static int igt_fill_blt_thread(void *arg)
ctx = thread->ctx;
if (!ctx) {
- ctx = live_context(i915, thread->file);
+ ctx = live_context_for_engine(engine, thread->file);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -223,7 +223,7 @@ static int igt_fill_blt_thread(void *arg)
ctx->sched.priority = I915_USER_PRIORITY(prio);
}
- ce = i915_gem_context_get_engine(ctx, BCS0);
+ ce = i915_gem_context_get_engine(ctx, 0);
GEM_BUG_ON(IS_ERR(ce));
/*
@@ -256,7 +256,7 @@ static int igt_fill_blt_thread(void *arg)
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
- obj = huge_gem_object(i915, phys_sz, sz);
+ obj = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -321,7 +321,7 @@ err_flush:
static int igt_copy_blt_thread(void *arg)
{
struct igt_thread_arg *thread = arg;
- struct drm_i915_private *i915 = thread->i915;
+ struct intel_engine_cs *engine = thread->engine;
struct rnd_state *prng = &thread->prng;
struct drm_i915_gem_object *src, *dst;
struct i915_gem_context *ctx;
@@ -333,7 +333,7 @@ static int igt_copy_blt_thread(void *arg)
ctx = thread->ctx;
if (!ctx) {
- ctx = live_context(i915, thread->file);
+ ctx = live_context_for_engine(engine, thread->file);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -341,7 +341,7 @@ static int igt_copy_blt_thread(void *arg)
ctx->sched.priority = I915_USER_PRIORITY(prio);
}
- ce = i915_gem_context_get_engine(ctx, BCS0);
+ ce = i915_gem_context_get_engine(ctx, 0);
GEM_BUG_ON(IS_ERR(ce));
/*
@@ -374,7 +374,7 @@ static int igt_copy_blt_thread(void *arg)
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
- src = huge_gem_object(i915, phys_sz, sz);
+ src = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(src)) {
err = PTR_ERR(src);
goto err_flush;
@@ -394,7 +394,7 @@ static int igt_copy_blt_thread(void *arg)
if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
src->cache_dirty = true;
- dst = huge_gem_object(i915, phys_sz, sz);
+ dst = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto err_put_src;
@@ -456,7 +456,7 @@ err_flush:
return err;
}
-static int igt_threaded_blt(struct drm_i915_private *i915,
+static int igt_threaded_blt(struct intel_engine_cs *engine,
int (*blt_fn)(void *arg),
unsigned int flags)
#define SINGLE_CTX BIT(0)
@@ -477,14 +477,14 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
if (!thread)
goto out_tsk;
- thread[0].file = mock_file(i915);
+ thread[0].file = mock_file(engine->i915);
if (IS_ERR(thread[0].file)) {
err = PTR_ERR(thread[0].file);
goto out_thread;
}
if (flags & SINGLE_CTX) {
- thread[0].ctx = live_context(i915, thread[0].file);
+ thread[0].ctx = live_context_for_engine(engine, thread[0].file);
if (IS_ERR(thread[0].ctx)) {
err = PTR_ERR(thread[0].ctx);
goto out_file;
@@ -492,7 +492,7 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
}
for (i = 0; i < n_cpus; ++i) {
- thread[i].i915 = i915;
+ thread[i].engine = engine;
thread[i].file = thread[0].file;
thread[i].ctx = thread[0].ctx;
thread[i].n_cpus = n_cpus;
@@ -532,24 +532,40 @@ out_tsk:
return err;
}
+static int test_copy_engines(struct drm_i915_private *i915,
+ int (*fn)(void *arg),
+ unsigned int flags)
+{
+ struct intel_engine_cs *engine;
+ int ret;
+
+ for_each_uabi_class_engine(engine, I915_ENGINE_CLASS_COPY, i915) {
+ ret = igt_threaded_blt(engine, fn, flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int igt_fill_blt(void *arg)
{
- return igt_threaded_blt(arg, igt_fill_blt_thread, 0);
+ return test_copy_engines(arg, igt_fill_blt_thread, 0);
}
static int igt_fill_blt_ctx0(void *arg)
{
- return igt_threaded_blt(arg, igt_fill_blt_thread, SINGLE_CTX);
+ return test_copy_engines(arg, igt_fill_blt_thread, SINGLE_CTX);
}
static int igt_copy_blt(void *arg)
{
- return igt_threaded_blt(arg, igt_copy_blt_thread, 0);
+ return test_copy_engines(arg, igt_copy_blt_thread, 0);
}
static int igt_copy_blt_ctx0(void *arg)
{
- return igt_threaded_blt(arg, igt_copy_blt_thread, SINGLE_CTX);
+ return test_copy_engines(arg, igt_copy_blt_thread, SINGLE_CTX);
}
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
@@ -564,9 +580,6 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- if (!HAS_ENGINE(i915, BCS0))
- return 0;
-
return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index e7e3c620f542..51b5a3421b40 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -23,6 +23,8 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ mutex_init(&ctx->mutex);
+
spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);
@@ -35,7 +37,7 @@ mock_context(struct drm_i915_private *i915,
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- mutex_init(&ctx->mutex);
+ mutex_init(&ctx->lut_mutex);
if (name) {
struct i915_ppgtt *ppgtt;
@@ -100,6 +102,43 @@ err_ctx:
}
struct i915_gem_context *
+live_context_for_engine(struct intel_engine_cs *engine, struct file *file)
+{
+ struct i915_gem_engines *engines;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+
+ engines = alloc_engines(1);
+ if (!engines)
+ return ERR_PTR(-ENOMEM);
+
+ ctx = live_context(engine->i915, file);
+ if (IS_ERR(ctx)) {
+ __free_engines(engines, 0);
+ return ctx;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ __free_engines(engines, 0);
+ return ERR_CAST(ce);
+ }
+
+ intel_context_set_gem(ce, ctx);
+ engines->engines[0] = ce;
+ engines->num_engines = 1;
+
+ mutex_lock(&ctx->engines_mutex);
+ i915_gem_context_set_user_engines(ctx);
+ engines = rcu_replace_pointer(ctx->engines, engines, 1);
+ mutex_unlock(&ctx->engines_mutex);
+
+ engines_idle_release(ctx, engines);
+
+ return ctx;
+}
+
+struct i915_gem_context *
kernel_context(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
index fb83d2f09212..2a6121d33352 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
@@ -9,6 +9,7 @@
struct file;
struct drm_i915_private;
+struct intel_engine_cs;
void mock_init_contexts(struct drm_i915_private *i915);
@@ -21,6 +22,9 @@ void mock_context_close(struct i915_gem_context *ctx);
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct file *file);
+struct i915_gem_context *
+live_context_for_engine(struct intel_engine_cs *engine, struct file *file);
+
struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
void kernel_context_close(struct i915_gem_context *ctx);
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
index 1de5fbaa1cf9..3a21cf63b3f0 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -9,6 +9,7 @@
#include "debugfs_engines.h"
#include "debugfs_gt.h"
#include "debugfs_gt_pm.h"
+#include "intel_sseu_debugfs.h"
#include "uc/intel_uc_debugfs.h"
#include "i915_drv.h"
@@ -25,6 +26,7 @@ void debugfs_gt_register(struct intel_gt *gt)
debugfs_engines_register(gt, root);
debugfs_gt_pm_register(gt, root);
+ intel_sseu_debugfs_register(gt, root);
intel_uc_debugfs_register(&gt->uc, root);
}
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
new file mode 100644
index 000000000000..b491a64919c8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gen2_engine_cs.h"
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_ring.h"
+
+int gen2_emit_flush(struct i915_request *rq, u32 mode)
+{
+ unsigned int num_store_dw = 12;
+ u32 cmd, *cs;
+
+ cmd = MI_FLUSH;
+ if (mode & EMIT_INVALIDATE)
+ cmd |= MI_READ_FLUSH;
+
+ cs = intel_ring_begin(rq, 2 + 4 * num_store_dw);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = cmd;
+ while (num_store_dw--) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ }
+ *cs++ = cmd;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ u32 cmd, *cs;
+ int i;
+
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH;
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_EXE_FLUSH;
+ if (IS_G4X(rq->engine->i915) || IS_GEN(rq->engine->i915, 5))
+ cmd |= MI_INVALIDATE_ISP;
+ }
+
+ i = 2;
+ if (mode & EMIT_INVALIDATE)
+ i += 20;
+
+ cs = intel_ring_begin(rq, i);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = cmd;
+
+ /*
+ * A random delay to let the CS invalidate take effect? Without this
+ * delay, the GPU relocation path fails as the CS does not see
+ * the updated contents. Just as important, if we apply the flushes
+ * to the EMIT_FLUSH branch (i.e. immediately after the relocation
+ * write and before the invalidate on the next batch), the relocations
+ * still fail. This implies that is a delay following invalidation
+ * that is required to reset the caches as opposed to a delay to
+ * ensure the memory is written.
+ */
+ if (mode & EMIT_INVALIDATE) {
+ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ for (i = 0; i < 12; i++)
+ *cs++ = MI_FLUSH;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ }
+
+ *cs++ = cmd;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
+ int flush, int post)
+{
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH;
+
+ while (flush--) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
+ *cs++ = rq->fence.seqno;
+ }
+
+ while (post--) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
+ }
+
+ *cs++ = MI_USER_INTERRUPT;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return __gen2_emit_breadcrumb(rq, cs, 16, 8);
+}
+
+u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return __gen2_emit_breadcrumb(rq, cs, 8, 8);
+}
+
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT SZ_256K
+#define I830_TLB_ENTRIES (2)
+#define I830_WA_SIZE max(I830_TLB_ENTRIES * SZ_4K, I830_BATCH_LIMIT)
+int i830_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 *cs, cs_offset =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
+
+ GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Evict the invalid PTE TLBs */
+ *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+ *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+ *cs++ = cs_offset;
+ *cs++ = 0xdeadbeef;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ cs = intel_ring_begin(rq, 6 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Blit the batch (which has now all relocs applied) to the
+ * stable batch scratch bo area (so that the CS never
+ * stumbles over its tlb invalidation bug) ...
+ */
+ *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+ *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+ *cs++ = cs_offset;
+ *cs++ = 4096;
+ *cs++ = offset;
+
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* ... and execute it. */
+ offset = cs_offset;
+ }
+
+ if (!(dispatch_flags & I915_DISPATCH_SECURE))
+ offset |= MI_BATCH_NON_SECURE;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen3_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 *cs;
+
+ if (!(dispatch_flags & I915_DISPATCH_SECURE))
+ offset |= MI_BATCH_NON_SECURE;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen4_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags)
+{
+ u32 security;
+ u32 *cs;
+
+ security = MI_BATCH_NON_SECURE_I965;
+ if (dispatch_flags & I915_DISPATCH_SECURE)
+ security = 0;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | security;
+ *cs++ = offset;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+void gen2_irq_enable(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
+ ENGINE_POSTING_READ16(engine, RING_IMR);
+}
+
+void gen2_irq_disable(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
+}
+
+void gen3_irq_enable(struct intel_engine_cs *engine)
+{
+ engine->i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
+}
+
+void gen3_irq_disable(struct intel_engine_cs *engine)
+{
+ engine->i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+}
+
+void gen5_irq_enable(struct intel_engine_cs *engine)
+{
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void gen5_irq_disable(struct intel_engine_cs *engine)
+{
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
new file mode 100644
index 000000000000..a5cd64a65c9e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN2_ENGINE_CS_H__
+#define __GEN2_ENGINE_CS_H__
+
+#include <linux/types.h>
+
+struct i915_request;
+struct intel_engine_cs;
+
+int gen2_emit_flush(struct i915_request *rq, u32 mode);
+int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode);
+
+u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs);
+u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs);
+
+int i830_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+int gen3_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+int gen4_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags);
+
+void gen2_irq_enable(struct intel_engine_cs *engine);
+void gen2_irq_disable(struct intel_engine_cs *engine);
+void gen3_irq_enable(struct intel_engine_cs *engine);
+void gen3_irq_disable(struct intel_engine_cs *engine);
+void gen5_irq_enable(struct intel_engine_cs *engine);
+void gen5_irq_disable(struct intel_engine_cs *engine);
+
+#endif /* __GEN2_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
new file mode 100644
index 000000000000..ce38d1bcaba3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gen6_engine_cs.h"
+#include "intel_engine.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+#include "intel_ring.h"
+
+#define HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
+
+/*
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+static int
+gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
+{
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0; /* low dword */
+ *cs++ = 0; /* high dword */
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+ u32 *cs, flags = 0;
+ int ret;
+
+ /* Force SNB workarounds for PIPE_CONTROL flushes */
+ ret = gen6_emit_post_sync_nonzero_flush(rq);
+ if (ret)
+ return ret;
+
+ /*
+ * Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact. And when rearranging requests, the order of flushes is
+ * unknown.
+ */
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /*
+ * Ensure that any following seqno writes only happen
+ * when the render cache is indeed flushed.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+ }
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+
+ /* Finally we can flush and with it emit the breadcrumb */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+static int mi_flush_dw(struct i915_request *rq, u32 flags)
+{
+ u32 cmd, *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_FLUSH_DW;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ cmd |= flags;
+
+ *cs++ = cmd;
+ *cs++ = HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
+{
+ return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
+}
+
+int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
+}
+
+int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode)
+{
+ return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
+}
+
+int gen6_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 security;
+ u32 *cs;
+
+ security = MI_BATCH_NON_SECURE_I965;
+ if (dispatch_flags & I915_DISPATCH_SECURE)
+ security = 0;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = __gen6_emit_bb_start(cs, offset, security);
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int
+hsw_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 security;
+ u32 *cs;
+
+ security = MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW;
+ if (dispatch_flags & I915_DISPATCH_SECURE)
+ security = 0;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = __gen6_emit_bb_start(cs, offset, security);
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen7_stall_cs(struct i915_request *rq)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+ u32 *cs, flags = 0;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /*
+ * CS_STALL suggests at least a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ /*
+ * Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
+
+ /*
+ * Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set.
+ */
+ gen7_stall_cs(rq);
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+#define GEN7_XCS_WA 32
+u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ int i;
+
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->fence.seqno;
+
+ for (i = 0; i < GEN7_XCS_WA; i++) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
+ }
+
+ *cs++ = MI_FLUSH_DW;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+#undef GEN7_XCS_WA
+
+void gen6_irq_enable(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+
+ /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+ ENGINE_POSTING_READ(engine, RING_IMR);
+
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void gen6_irq_disable(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void hsw_irq_enable_vecs(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
+
+ /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+ ENGINE_POSTING_READ(engine, RING_IMR);
+
+ gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void hsw_irq_disable_vecs(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~0);
+ gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.h b/drivers/gpu/drm/i915/gt/gen6_engine_cs.h
new file mode 100644
index 000000000000..76c6bc9f3bde
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN6_ENGINE_CS_H__
+#define __GEN6_ENGINE_CS_H__
+
+#include <linux/types.h>
+
+#include "intel_gpu_commands.h"
+
+struct i915_request;
+struct intel_engine_cs;
+
+int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode);
+int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode);
+u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode);
+u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+int gen6_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+int hsw_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+
+void gen6_irq_enable(struct intel_engine_cs *engine);
+void gen6_irq_disable(struct intel_engine_cs *engine);
+
+void hsw_irq_enable_vecs(struct intel_engine_cs *engine);
+void hsw_irq_disable_vecs(struct intel_engine_cs *engine);
+
+#endif /* __GEN6_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index f4fec7eb4064..cdc0b9c54305 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -183,13 +183,11 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt, *alloc = NULL;
- intel_wakeref_t wakeref;
+ bool flush = false;
u64 from = start;
unsigned int pde;
int ret = 0;
- wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
spin_lock(&pd->lock);
gen6_for_each_pde(pt, pd, start, length, pde) {
const unsigned int count = gen6_pte_count(start, length);
@@ -214,14 +212,20 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
alloc = pt;
pt = pd->entry[pde];
}
+
+ flush = true;
}
atomic_add(count, &pt->used);
}
spin_unlock(&pd->lock);
- if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
- gen6_flush_pd(ppgtt, from, start);
+ if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
+ gen6_flush_pd(ppgtt, from, start);
+ }
goto out;
@@ -230,7 +234,6 @@ unwind_out:
out:
if (alloc)
free_px(vm, alloc);
- intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
return ret;
}
@@ -299,11 +302,12 @@ static void pd_vma_clear_pages(struct i915_vma *vma)
vma->pages = NULL;
}
-static int pd_vma_bind(struct i915_vma *vma,
+static int pd_vma_bind(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct gen6_ppgtt *ppgtt = vma->private;
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
@@ -314,7 +318,7 @@ static int pd_vma_bind(struct i915_vma *vma,
return 0;
}
-static void pd_vma_unbind(struct i915_vma *vma)
+static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{
struct gen6_ppgtt *ppgtt = vma->private;
struct i915_page_directory * const pd = ppgtt->base.pd;
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index de595b66a746..d93d85cd3027 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -396,7 +396,7 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
i915_gem_object_flush_map(vma->obj);
- i915_gem_object_unpin_map(vma->obj);
+ __i915_gem_object_release_map(vma->obj);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index d907d538176e..91786310c114 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -314,13 +314,18 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
lockdep_assert_held(&rq->lock);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return true;
+
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
struct intel_context *ce = rq->context;
struct list_head *pos;
spin_lock(&b->irq_lock);
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+
+ if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ goto unlock;
if (!__intel_breadcrumbs_arm_irq(b))
goto unlock;
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index 487299cb91f2..b9c8163978a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -30,7 +30,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
- *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
+ *cs++ = intel_sseu_make_rpcs(rq->engine->gt, &sseu);
intel_ring_advance(rq, cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 9bf6d4989968..a9249a23903a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -187,7 +187,6 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
-#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
#define I915_HWS_CSB_BUF0_INDEX 0x10
#define I915_HWS_CSB_WRITE_INDEX 0x1f
@@ -335,7 +334,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
-ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
+ ktime_t *now);
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 8691eb61e185..dd1a42c4d344 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -370,7 +370,7 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
* instances.
*/
if ((INTEL_GEN(i915) >= 11 &&
- RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
+ engine->gt->info.vdbox_sfc_access & engine->mask) ||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
@@ -414,12 +414,12 @@ void intel_engines_release(struct intel_gt *gt)
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
- intel_wakeref_wait_for_idle(&engine->wakeref);
- GEM_BUG_ON(intel_engine_pm_is_awake(engine));
-
if (!engine->release)
continue;
+ intel_wakeref_wait_for_idle(&engine->wakeref);
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+
engine->release(engine);
engine->release = NULL;
@@ -450,6 +450,80 @@ void intel_engines_free(struct intel_gt *gt)
}
}
+/*
+ * Determine which engines are fused off in our particular hardware.
+ * Note that we have a catch-22 situation where we need to be able to access
+ * the blitter forcewake domain to read the engine fuses, but at the same time
+ * we need to know which engines are available on the system to know which
+ * forcewake domains are present. We solve this by intializing the forcewake
+ * domains based on the full engine mask in the platform capabilities before
+ * calling this function and pruning the domains for fused-off engines
+ * afterwards.
+ */
+static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_gt_info *info = &gt->info;
+ struct intel_uncore *uncore = gt->uncore;
+ unsigned int logical_vdbox = 0;
+ unsigned int i;
+ u32 media_fuse;
+ u16 vdbox_mask;
+ u16 vebox_mask;
+
+ info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
+
+ if (INTEL_GEN(i915) < 11)
+ return info->engine_mask;
+
+ media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
+
+ vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
+
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (!HAS_ENGINE(gt, _VCS(i))) {
+ vdbox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vdbox_mask)) {
+ info->engine_mask &= ~BIT(_VCS(i));
+ drm_dbg(&i915->drm, "vcs%u fused off\n", i);
+ continue;
+ }
+
+ /*
+ * In Gen11, only even numbered logical VDBOXes are
+ * hooked up to an SFC (Scaler & Format Converter) unit.
+ * In TGL each VDBOX has access to an SFC.
+ */
+ if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0)
+ gt->info.vdbox_sfc_access |= BIT(i);
+ }
+ drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(gt));
+ GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
+
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ if (!HAS_ENGINE(gt, _VECS(i))) {
+ vebox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vebox_mask)) {
+ info->engine_mask &= ~BIT(_VECS(i));
+ drm_dbg(&i915->drm, "vecs%u fused off\n", i);
+ }
+ }
+ drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(gt));
+ GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
+
+ return info->engine_mask;
+}
+
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
* @gt: pointer to struct intel_gt
@@ -459,8 +533,7 @@ void intel_engines_free(struct intel_gt *gt)
int intel_engines_init_mmio(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
- struct intel_device_info *device_info = mkwrite_device_info(i915);
- const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
+ const unsigned int engine_mask = init_engine_mask(gt);
unsigned int mask = 0;
unsigned int i;
int err;
@@ -473,7 +546,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
- if (!HAS_ENGINE(i915, i))
+ if (!HAS_ENGINE(gt, i))
continue;
err = intel_engine_setup(gt, i);
@@ -489,14 +562,16 @@ int intel_engines_init_mmio(struct intel_gt *gt)
* engines.
*/
if (drm_WARN_ON(&i915->drm, mask != engine_mask))
- device_info->engine_mask = mask;
+ gt->info.engine_mask = mask;
- RUNTIME_INFO(i915)->num_engines = hweight32(mask);
+ gt->info.num_engines = hweight32(mask);
intel_gt_check_and_clear_faults(gt);
intel_setup_engine_capabilities(gt);
+ intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
+
return 0;
cleanup:
@@ -634,7 +709,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
/* Use the whole device by default */
engine->sseu =
- intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
+ intel_sseu_from_device_info(&engine->gt->info.sseu);
intel_engine_init_workarounds(engine);
intel_engine_init_whitelist(engine);
@@ -661,7 +736,6 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
if (!frame)
return -ENOMEM;
- frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.context = ce;
rcu_assign_pointer(frame->rq.timeline, ce->timeline);
@@ -1001,7 +1075,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ const struct sseu_dev_info *sseu = &engine->gt->info.sseu;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
@@ -1095,19 +1169,21 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
- if (__tasklet_is_scheduled(t)) {
- local_bh_disable();
- if (tasklet_trylock(t)) {
- /* Must wait for any GPU reset in progress. */
- if (__tasklet_is_enabled(t))
- t->func(t->data);
- tasklet_unlock(t);
- }
- local_bh_enable();
- }
+ if (!t->func)
+ return;
- /* Otherwise flush the tasklet if it was running on another cpu */
- tasklet_unlock_wait(t);
+ /* Synchronise and wait for the tasklet on another CPU */
+ tasklet_kill(t);
+
+ /* Having cancelled the tasklet, ensure that is run */
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->func(t->data);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
}
/**
@@ -1194,8 +1270,7 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
-static int print_sched_attr(struct drm_i915_private *i915,
- const struct i915_sched_attr *attr,
+static int print_sched_attr(const struct i915_sched_attr *attr,
char *buf, int x, int len)
{
if (attr->priority == I915_PRIORITY_INVALID)
@@ -1215,7 +1290,7 @@ static void print_request(struct drm_printer *m,
char buf[80] = "";
int x = 0;
- x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
+ x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
prefix,
@@ -1423,9 +1498,11 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
int len;
len = scnprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ccid:%08x, ",
+ "\t\tActive[%d]: ccid:%08x%s%s, ",
(int)(port - execlists->active),
- rq->context->lrc.ccid);
+ rq->context->lrc.ccid,
+ intel_context_is_closed(rq->context) ? "!" : "",
+ intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
@@ -1435,9 +1512,11 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
int len;
len = scnprintf(hdr, sizeof(hdr),
- "\t\tPending[%d]: ccid:%08x, ",
+ "\t\tPending[%d]: ccid:%08x%s%s, ",
(int)(port - execlists->pending),
- rq->context->lrc.ccid);
+ rq->context->lrc.ccid,
+ intel_context_is_closed(rq->context) ? "!" : "",
+ intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
@@ -1506,6 +1585,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_request *rq;
intel_wakeref_t wakeref;
unsigned long flags;
+ ktime_t dummy;
if (header) {
va_list ap;
@@ -1523,6 +1603,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
yesno(!llist_empty(&engine->barrier_tasks)));
drm_printf(m, "\tLatency: %luus\n",
ewma__engine_latency_read(&engine->latency));
+ if (intel_engine_supports_stats(engine))
+ drm_printf(m, "\tRuntime: %llums\n",
+ ktime_to_ms(intel_engine_get_busy_time(engine,
+ &dummy)));
+ drm_printf(m, "\tForcewake: %x domains, %d active\n",
+ engine->fw_domain, atomic_read(&engine->fw_active));
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
@@ -1589,7 +1675,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_engine_print_breadcrumbs(engine, m);
}
-static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
+static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
+ ktime_t *now)
{
ktime_t total = engine->stats.total;
@@ -1597,9 +1684,9 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
* If the engine is executing something at the moment
* add it to the total.
*/
+ *now = ktime_get();
if (atomic_read(&engine->stats.active))
- total = ktime_add(total,
- ktime_sub(ktime_get(), engine->stats.start));
+ total = ktime_add(total, ktime_sub(*now, engine->stats.start));
return total;
}
@@ -1607,17 +1694,18 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
/**
* intel_engine_get_busy_time() - Return current accumulated engine busyness
* @engine: engine to report on
+ * @now: monotonic timestamp of sampling
*
* Returns accumulated time @engine was busy since engine stats were enabled.
*/
-ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
{
unsigned int seq;
ktime_t total;
do {
seq = read_seqbegin(&engine->stats.lock);
- total = __intel_engine_get_busy_time(engine);
+ total = __intel_engine_get_busy_time(engine, now);
} while (read_seqretry(&engine->stats.lock, seq));
return total;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 5136c8bf112d..8ffdf676c0a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -4,6 +4,7 @@
* Copyright © 2019 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_request.h"
#include "intel_context.h"
@@ -31,7 +32,7 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
delay = msecs_to_jiffies_timeout(delay);
if (delay >= HZ)
delay = round_jiffies_up_relative(delay);
- mod_delayed_work(system_wq, &engine->heartbeat.work, delay);
+ mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay);
return true;
}
@@ -48,8 +49,10 @@ static void show_heartbeat(const struct i915_request *rq,
struct drm_printer p = drm_debug_printer("heartbeat");
intel_engine_dump(engine, &p,
- "%s heartbeat {prio:%d} not ticking\n",
+ "%s heartbeat {seqno:%llx:%lld, prio:%d} not ticking\n",
engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
rq->sched.attr.priority);
}
@@ -62,6 +65,10 @@ static void heartbeat(struct work_struct *wrk)
container_of(wrk, typeof(*engine), heartbeat.work.work);
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
+ unsigned long serial;
+
+ /* Just in case everything has gone horribly wrong, give it a kick */
+ intel_engine_flush_submission(engine);
rq = engine->heartbeat.systole;
if (rq && i915_request_completed(rq)) {
@@ -76,8 +83,19 @@ static void heartbeat(struct work_struct *wrk)
goto out;
if (engine->heartbeat.systole) {
- if (engine->schedule &&
- rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
+ if (!i915_sw_fence_signaled(&rq->submit)) {
+ /*
+ * Not yet submitted, system is stalled.
+ *
+ * This more often happens for ring submission,
+ * where all contexts are funnelled into a common
+ * ringbuffer. If one context is blocked on an
+ * external fence, not only is it not submitted,
+ * but all other contexts, including the kernel
+ * context are stuck waiting for the signal.
+ */
+ } else if (engine->schedule &&
+ rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
/*
* Gradually raise the priority of the heartbeat to
* give high priority work [which presumably desires
@@ -105,10 +123,19 @@ static void heartbeat(struct work_struct *wrk)
goto out;
}
- if (engine->wakeref_serial == engine->serial)
+ serial = READ_ONCE(engine->serial);
+ if (engine->wakeref_serial == serial)
goto out;
- mutex_lock(&ce->timeline->mutex);
+ if (!mutex_trylock(&ce->timeline->mutex)) {
+ /* Unable to lock the kernel timeline, is the engine stuck? */
+ if (xchg(&engine->heartbeat.blocked, serial) == serial)
+ intel_gt_handle_error(engine->gt, engine->mask,
+ I915_ERROR_CAPTURE,
+ "no heartbeat on %s",
+ engine->name);
+ goto out;
+ }
intel_context_enter(ce);
rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
@@ -117,7 +144,7 @@ static void heartbeat(struct work_struct *wrk)
goto unlock;
idle_pulse(engine, rq);
- if (i915_modparams.enable_hangcheck)
+ if (engine->i915->params.enable_hangcheck)
engine->heartbeat.systole = i915_request_get(rq);
__i915_request_commit(rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index d0a1078ef632..8ec3eecf3e39 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -142,6 +142,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
return true;
GEM_BUG_ON(!intel_context_is_barrier(ce));
+ GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
/* Already inside the kernel context, safe to power down. */
if (engine->wakeref_serial == engine->serial)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 2b6cdf47d428..8de92fd7d392 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -24,6 +24,7 @@
#include "i915_selftest.h"
#include "intel_sseu.h"
#include "intel_timeline_types.h"
+#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
@@ -176,8 +177,12 @@ struct intel_engine_execlists {
* the first error interrupt, record the EIR and schedule the tasklet.
* In the tasklet, we process the pending CS events to ensure we have
* the guilty request, and then reset the engine.
+ *
+ * Low 16b are used by HW, with the upper 16b used as the enabling mask.
+ * Reserve the upper 16b for tracking internal errors.
*/
u32 error_interrupt;
+#define ERROR_CSB BIT(31)
/**
* @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
@@ -313,6 +318,16 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
+ /*
+ * Some w/a require forcewake to be held (which prevents RC6) while
+ * a particular engine is active. If so, we set fw_domain to which
+ * domains need to be held for the duration of request activity,
+ * and 0 if none. We try to limit the duration of the hold as much
+ * as possible.
+ */
+ enum forcewake_domains fw_domain;
+ atomic_t fw_active;
+
unsigned long context_tag;
struct rb_node uabi_node;
@@ -337,6 +352,7 @@ struct intel_engine_cs {
struct {
struct delayed_work work;
struct i915_request *systole;
+ unsigned long blocked;
} heartbeat;
unsigned long serial;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 848decee9066..34e6096f196e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -201,7 +201,7 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
uabi_node);
char old[sizeof(engine->name)];
- if (intel_gt_has_init_error(engine->gt))
+ if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 66165b10256e..62979ea591f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -108,13 +108,32 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
- struct i915_vma *vma;
+ struct i915_vma *vma, *vn;
+ int open;
+
+ mutex_lock(&ggtt->vm.mutex);
- list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+ /* Skip rewriting PTE on VMA unbind. */
+ open = atomic_xchg(&ggtt->vm.open, 0);
+
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
i915_vma_wait_for_bind(vma);
+ if (i915_vma_is_pinned(vma))
+ continue;
+
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ __i915_vma_evict(vma);
+ drm_mm_remove_node(&vma->node);
+ }
+ }
+
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
ggtt->invalidate(ggtt);
+ atomic_set(&ggtt->vm.open, open);
+
+ mutex_unlock(&ggtt->vm.mutex);
intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
@@ -417,35 +436,31 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
-static int ggtt_bind_vma(struct i915_vma *vma,
+static int ggtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
+ if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
+ return 0;
+
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-
+ vm->insert_entries(vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
-
return 0;
}
-static void ggtt_unbind_vma(struct i915_vma *vma)
+static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+ vm->clear_range(vm, vma->node.start, vma->size);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -553,7 +568,8 @@ err:
return ret;
}
-static int aliasing_gtt_bind_vma(struct i915_vma *vma,
+static int aliasing_gtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
@@ -566,44 +582,27 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
+ struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias;
- if (flags & I915_VMA_ALLOC) {
- ret = alias->vm.allocate_va_range(&alias->vm,
- vma->node.start,
- vma->size);
- if (ret)
- return ret;
-
- set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
- }
-
- GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
- __i915_vma_flags(vma)));
- alias->vm.insert_entries(&alias->vm, vma,
- cache_level, pte_flags);
+ ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags);
+ if (ret)
+ return ret;
}
if (flags & I915_VMA_GLOBAL_BIND)
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma, cache_level, pte_flags);
return 0;
}
-static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
+static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma)
{
- if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
- struct i915_address_space *vm = vma->vm;
-
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
vm->clear_range(vm, vma->node.start, vma->size);
- }
-
- if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
- struct i915_address_space *vm =
- &i915_vm_to_ggtt(vma->vm)->alias->vm;
- vm->clear_range(vm, vma->node.start, vma->size);
- }
+ if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
+ ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
}
static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
@@ -1166,6 +1165,11 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
ggtt->invalidate(ggtt);
}
+static unsigned int clear_bind(struct i915_vma *vma)
+{
+ return atomic_fetch_and(~I915_VMA_BIND_MASK, &vma->flags);
+}
+
void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
struct i915_vma *vma;
@@ -1183,14 +1187,11 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
+ unsigned int was_bound = clear_bind(vma);
- if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
- continue;
-
- clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
- PIN_GLOBAL, NULL));
+ was_bound, NULL));
if (obj) { /* only used during resume => exclusive access */
flush |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f069551e412f..e0755f1a904b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -44,6 +44,14 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
gt->ggtt = ggtt;
}
+int intel_gt_init_mmio(struct intel_gt *gt)
+{
+ intel_uc_init_mmio(&gt->uc);
+ intel_sseu_info_init(gt);
+
+ return intel_engines_init_mmio(gt);
+}
+
static void init_unused_ring(struct intel_gt *gt, u32 base)
{
struct intel_uncore *uncore = gt->uncore;
@@ -510,7 +518,7 @@ static int __engines_verify_workarounds(struct intel_gt *gt)
static void __intel_gt_disable(struct intel_gt *gt)
{
- intel_gt_set_wedged_on_init(gt);
+ intel_gt_set_wedged_on_fini(gt);
intel_gt_suspend_prepare(gt);
intel_gt_suspend_late(gt);
@@ -616,6 +624,11 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
void intel_gt_driver_release(struct intel_gt *gt)
{
struct i915_address_space *vm;
+ intel_wakeref_t wakeref;
+
+ /* Scrub all HW state upon release */
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ __intel_gt_reset(gt, ALL_ENGINES);
vm = fetch_and_zero(&gt->vm);
if (vm) /* FIXME being called twice on error paths :( */
@@ -637,3 +650,11 @@ void intel_gt_driver_late_release(struct intel_gt *gt)
intel_gt_fini_timelines(gt);
intel_engines_free(gt);
}
+
+void intel_gt_info_print(const struct intel_gt_info *info,
+ struct drm_printer *p)
+{
+ drm_printf(p, "available engines: %x\n", info->engine_mask);
+
+ intel_sseu_dump(&info->sseu, p);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 4fac043750aa..9157c7411f60 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -11,6 +11,7 @@
#include "intel_reset.h"
struct drm_i915_private;
+struct drm_printer;
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
@@ -35,6 +36,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt);
void intel_gt_driver_register(struct intel_gt *gt);
@@ -58,14 +60,21 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
return i915_ggtt_offset(gt->scratch) + field;
}
-static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
+static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
{
- return __intel_reset_failed(&gt->reset);
+ return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags) ||
+ test_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
}
-static inline bool intel_gt_has_init_error(const struct intel_gt *gt)
+static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
{
- return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+ GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
+ !test_bit(I915_WEDGED, &gt->reset.flags));
+
+ return unlikely(test_bit(I915_WEDGED, &gt->reset.flags));
}
+void intel_gt_info_print(const struct intel_gt_info *info,
+ struct drm_printer *p);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 1495054a4305..418ae184cecf 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -212,8 +212,9 @@ void intel_gt_flush_buffer_pool(struct intel_gt *gt)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
- if (cancel_delayed_work_sync(&pool->work))
+ do {
pool_free_imm(pool);
+ } while (cancel_delayed_work_sync(&pool->work));
}
void intel_gt_fini_buffer_pool(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 0cc7dd54f4f9..b05da68e52f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -27,7 +27,8 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
u32 eir;
- eir = ENGINE_READ(engine, RING_EIR);
+ /* Upper 16b are the enabling mask, rsvd for internal errors */
+ eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
ENGINE_TRACE(engine, "CS error: %x\n", eir);
/* Disable the error interrupt until after the reset */
@@ -457,7 +458,7 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt)
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_ENGINE(gt->i915, VECS0)) {
+ if (HAS_ENGINE(gt, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 6bdb434a442d..274aa0dd7050 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -188,7 +188,7 @@ int intel_gt_resume(struct intel_gt *gt)
enum intel_engine_id id;
int err;
- err = intel_gt_has_init_error(gt);
+ err = intel_gt_has_unrecoverable_error(gt);
if (err)
return err;
@@ -214,8 +214,8 @@ int intel_gt_resume(struct intel_gt *gt)
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
- drm_err(&gt->i915->drm,
- "Failed to initialize GPU, declaring it wedged!\n");
+ i915_probe_error(gt->i915,
+ "Failed to initialize GPU, declaring it wedged!\n");
goto err_wedged;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 16ff47c83bd5..66fcbf9d0fdd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -31,12 +31,15 @@ static bool engine_active(const struct intel_engine_cs *engine)
return !list_empty(&engine->kernel_context->timeline->requests);
}
-static bool flush_submission(struct intel_gt *gt)
+static bool flush_submission(struct intel_gt *gt, long timeout)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
bool active = false;
+ if (!timeout)
+ return false;
+
if (!intel_gt_pm_is_awake(gt))
return false;
@@ -139,7 +142,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
if (unlikely(timeout < 0))
timeout = -timeout, interruptible = false;
- flush_submission(gt); /* kick the ksoftirqd tasklets */
+ flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) {
@@ -194,7 +197,7 @@ out_active: spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
- if (flush_submission(gt)) /* Wait, there's more! */
+ if (flush_submission(gt, timeout)) /* Wait, there's more! */
active_count++;
return active_count ? timeout : 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 0cc1d6b185dc..6d39a4a11bf3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -109,6 +109,17 @@ struct intel_gt {
struct intel_gt_buffer_pool buffer_pool;
struct i915_vma *scratch;
+
+ struct intel_gt_info {
+ intel_engine_mask_t engine_mask;
+ u8 num_engines;
+
+ /* Media engine access to SFC per instance */
+ u8 vdbox_sfc_access;
+
+ /* Slice/subslice/EU info */
+ struct sseu_dev_info sseu;
+ } info;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index d93ebdf3fa0e..f2b75078e05f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -198,14 +198,16 @@ struct intel_gt;
struct i915_vma_ops {
/* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_vma *vma,
+ int (*bind_vma)(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
/*
* Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page.
*/
- void (*unbind_vma)(struct i915_vma *vma);
+ void (*unbind_vma)(struct i915_address_space *vm,
+ struct i915_vma *vma);
int (*set_pages)(struct i915_vma *vma);
void (*clear_pages)(struct i915_vma *vma);
@@ -566,6 +568,13 @@ int ggtt_set_pages(struct i915_vma *vma);
int ppgtt_set_pages(struct i915_vma *vma);
void clear_pages(struct i915_vma *vma);
+int ppgtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+void ppgtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma);
+
void gtt_write_workarounds(struct intel_gt *gt);
void setup_private_pat(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index cb07e1d2a353..24322ef08aa4 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -446,6 +446,9 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
* we have to flip the index value to become priority.
*/
p = to_priolist(rb);
+ if (!I915_USER_PRIORITY_SHIFT)
+ return p->priority;
+
return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
}
@@ -1103,7 +1106,7 @@ static struct i915_request *
__unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
- struct list_head *uninitialized_var(pl);
+ struct list_head *pl;
int prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->active.lock);
@@ -1377,6 +1380,8 @@ __execlists_schedule_in(struct i915_request *rq)
ce->lrc.ccid |= engine->execlists.ccid;
__intel_gt_pm_get(engine->gt);
+ if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active))
+ intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
@@ -1409,8 +1414,8 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
struct i915_request *next = READ_ONCE(ve->request);
- if (next && next->execution_mask & ~rq->execution_mask)
- tasklet_schedule(&ve->base.execlists.tasklet);
+ if (next == rq || (next && next->execution_mask & ~rq->execution_mask))
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
}
static inline void
@@ -1445,6 +1450,8 @@ __execlists_schedule_out(struct i915_request *rq,
intel_context_update_runtime(ce);
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ if (engine->fw_domain && !atomic_dec_return(&engine->fw_active))
+ intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
intel_gt_pm_put_async(engine->gt);
/*
@@ -1636,9 +1643,9 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
ccid = ce->lrc.ccid;
/*
- * Sentinels are supposed to be lonely so they flush the
- * current exection off the HW. Check that they are the
- * only request in the pending submission.
+ * Sentinels are supposed to be the last request so they flush
+ * the current execution off the HW. Check that they are the only
+ * request in the pending submission.
*/
if (sentinel) {
GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
@@ -1647,15 +1654,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
port - execlists->pending);
return false;
}
-
sentinel = i915_request_has_sentinel(rq);
- if (sentinel && port != execlists->pending) {
- GEM_TRACE_ERR("%s: sentinel context:%llx not in prime position[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
/* Hold tightly onto the lock to prevent concurrent retires! */
if (!spin_trylock_irqsave(&rq->lock, flags))
@@ -1967,7 +1966,7 @@ static int
switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
{
if (list_is_last(&rq->sched.link, &engine->active.requests))
- return INT_MIN;
+ return engine->execlists.queue_priority_hint;
return rq_prio(list_next_entry(rq, sched.link));
}
@@ -2445,6 +2444,7 @@ done:
set_preempt_timeout(engine, *active);
execlists_submit_ports(engine);
} else {
+ start_timeslice(engine, execlists->queue_priority_hint);
skip_submit:
ring_set_paused(engine, 0);
}
@@ -2569,6 +2569,25 @@ static void process_csb(struct intel_engine_cs *engine)
return;
/*
+ * We will consume all events from HW, or at least pretend to.
+ *
+ * The sequence of events from the HW is deterministic, and derived
+ * from our writes to the ELSP, with a smidgen of variability for
+ * the arrival of the asynchronous requests wrt to the inflight
+ * execution. If the HW sends an event that does not correspond with
+ * the one we are expecting, we have to abandon all hope as we lose
+ * all tracking of what the engine is actually executing. We will
+ * only detect we are out of sequence with the HW when we get an
+ * 'impossible' event because we have already drained our own
+ * preemption/promotion queue. If this occurs, we know that we likely
+ * lost track of execution earlier and must unwind and restart, the
+ * simplest way is by stop processing the event queue and force the
+ * engine to reset.
+ */
+ execlists->csb_head = tail;
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
+
+ /*
* Hopefully paired with a wmb() in HW!
*
* We must complete the read of the write pointer before any reads
@@ -2577,8 +2596,6 @@ static void process_csb(struct intel_engine_cs *engine)
* we perform the READ_ONCE(*csb_write).
*/
rmb();
-
- ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
do {
bool promote;
@@ -2613,6 +2630,11 @@ static void process_csb(struct intel_engine_cs *engine)
if (promote) {
struct i915_request * const *old = execlists->active;
+ if (GEM_WARN_ON(!*execlists->pending)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
ring_set_paused(engine, 0);
/* Point active to the new ELSP; prevent overwriting */
@@ -2635,7 +2657,10 @@ static void process_csb(struct intel_engine_cs *engine)
WRITE_ONCE(execlists->pending[0], NULL);
} else {
- GEM_BUG_ON(!*execlists->active);
+ if (GEM_WARN_ON(!*execlists->active)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
/* port0 completed, advanced to port1 */
trace_ports(execlists, "completed", execlists->active);
@@ -2686,7 +2711,6 @@ static void process_csb(struct intel_engine_cs *engine)
}
} while (head != tail);
- execlists->csb_head = head;
set_timeslice(engine);
/*
@@ -3005,12 +3029,12 @@ static u32 active_ccid(struct intel_engine_cs *engine)
return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
}
-static bool execlists_capture(struct intel_engine_cs *engine)
+static void execlists_capture(struct intel_engine_cs *engine)
{
struct execlists_capture *cap;
if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
- return true;
+ return;
/*
* We need to _quickly_ capture the engine state before we reset.
@@ -3019,7 +3043,7 @@ static bool execlists_capture(struct intel_engine_cs *engine)
*/
cap = capture_regs(engine);
if (!cap)
- return true;
+ return;
spin_lock_irq(&engine->active.lock);
cap->rq = active_context(engine, active_ccid(engine));
@@ -3056,14 +3080,13 @@ static bool execlists_capture(struct intel_engine_cs *engine)
INIT_WORK(&cap->work, execlists_capture_work);
schedule_work(&cap->work);
- return true;
+ return;
err_rq:
i915_request_put(cap->rq);
err_free:
i915_gpu_coredump_put(cap->error);
kfree(cap);
- return false;
}
static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
@@ -3083,10 +3106,8 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
tasklet_disable_nosync(&engine->execlists.tasklet);
ring_set_paused(engine, 1); /* Freeze the current request in place */
- if (execlists_capture(engine))
- intel_engine_reset(engine, msg);
- else
- ring_set_paused(engine, 0);
+ execlists_capture(engine);
+ intel_engine_reset(engine, msg);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
@@ -3117,9 +3138,18 @@ static void execlists_submission_tasklet(unsigned long data)
process_csb(engine);
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+ const char *msg;
+
+ /* Generate the error message in priority wrt to the user! */
+ if (engine->execlists.error_interrupt & GENMASK(15, 0))
+ msg = "CS error"; /* thrown by a user payload */
+ else if (engine->execlists.error_interrupt & ERROR_CSB)
+ msg = "invalid CSB event";
+ else
+ msg = "internal error";
+
engine->execlists.error_interrupt = 0;
- if (ENGINE_READ(engine, RING_ESR)) /* confirm the error */
- execlists_reset(engine, "CS error");
+ execlists_reset(engine, msg);
}
if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
@@ -3170,13 +3200,6 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
if (reset_in_progress(execlists))
return; /* defer until we restart the engine following reset */
- /* Hopefully we clear execlists->pending[] to let us through */
- if (READ_ONCE(execlists->pending[0]) &&
- tasklet_trylock(&execlists->tasklet)) {
- process_csb(engine);
- tasklet_unlock(&execlists->tasklet);
- }
-
__execlists_submission_tasklet(engine);
}
@@ -3199,11 +3222,25 @@ static bool ancestor_on_hold(const struct intel_engine_cs *engine,
return !list_empty(&engine->active.hold) && hold_request(rq);
}
+static void flush_csb(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+
+ if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) {
+ if (!reset_in_progress(el))
+ process_csb(engine);
+ tasklet_unlock(&el->tasklet);
+ }
+}
+
static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
+ /* Hopefully we clear execlists->pending[] to let us through */
+ flush_csb(engine);
+
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->active.lock, flags);
@@ -3415,7 +3452,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
/* RPCS */
if (engine->class == RENDER_CLASS) {
regs[CTX_R_PWR_CLK_STATE] =
- intel_sseu_make_rpcs(engine->i915, &ce->sseu);
+ intel_sseu_make_rpcs(engine->gt, &ce->sseu);
i915_oa_init_reg_state(ce, engine);
}
@@ -3536,7 +3573,7 @@ static int emit_pdps(struct i915_request *rq)
int err, i;
u32 *cs;
- GEM_BUG_ON(intel_vgpu_active(rq->i915));
+ GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
/*
* Beware ye of the dragons, this sequence is magic!
@@ -3873,7 +3910,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
&wa_ctx->per_ctx };
wa_bb_func_t wa_bb_fn[2];
- struct page *page;
void *batch, *batch_ptr;
unsigned int i;
int ret;
@@ -3909,14 +3945,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
- page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
- batch = batch_ptr = kmap_atomic(page);
+ batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
/*
* Emit the two workaround batch buffers, recording the offset from the
* start of the workaround batch buffer object for each and their
* respective sizes.
*/
+ batch_ptr = batch;
for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
wa_bb[i]->offset = batch_ptr - batch;
if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
@@ -3928,10 +3964,10 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
+ GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
- BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
-
- kunmap_atomic(batch);
+ __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
+ __i915_gem_object_release_map(wa_ctx->vma->obj);
if (ret)
lrc_destroy_wa_ctx(engine);
@@ -4515,11 +4551,11 @@ static int gen8_emit_flush_render(struct i915_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN(request->i915, 9))
+ if (IS_GEN(request->engine->i915, 9))
vf_flush_wa = true;
/* WaForGAMHang:kbl */
- if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
+ if (IS_KBL_REVID(request->engine->i915, 0, KBL_REVID_B0))
dc_flush_wa = true;
}
@@ -5587,7 +5623,7 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
list_move_tail(&rq->sched.link, virtual_queue(ve));
- tasklet_schedule(&ve->base.execlists.tasklet);
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
}
spin_unlock_irqrestore(&ve->base.active.lock, flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index f86f7e68ce5e..f0862e924d11 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -155,16 +155,16 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
return ppgtt;
}
-static int ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+int ppgtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
u32 pte_flags;
int err;
- if (flags & I915_VMA_ALLOC) {
- err = vma->vm->allocate_va_range(vma->vm,
- vma->node.start, vma->size);
+ if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
+ err = vm->allocate_va_range(vm, vma->node.start, vma->size);
if (err)
return err;
@@ -176,17 +176,16 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
- GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ vm->insert_entries(vm, vma, cache_level, pte_flags);
wmb();
return 0;
}
-static void ppgtt_unbind_vma(struct i915_vma *vma)
+void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{
if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+ vm->clear_range(vm, vma->node.start, vma->size);
}
int ppgtt_set_pages(struct i915_vma *vma)
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index f59e7875cc5e..1bfad589c63b 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -61,7 +61,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
#define OUT_BATCH(batch, i, val) \
do { \
if ((i) >= PAGE_SIZE / sizeof(u32)) \
- goto err; \
+ goto out; \
(batch)[(i)++] = (val); \
} while(0)
@@ -70,15 +70,12 @@ static int render_state_setup(struct intel_renderstate *so,
{
const struct intel_renderstate_rodata *rodata = so->rodata;
unsigned int i = 0, reloc_index = 0;
- unsigned int needs_clflush;
+ int ret = -EINVAL;
u32 *d;
- int ret;
- ret = i915_gem_object_prepare_write(so->vma->obj, &needs_clflush);
- if (ret)
- return ret;
-
- d = kmap_atomic(i915_gem_object_get_dirty_page(so->vma->obj, 0));
+ d = i915_gem_object_pin_map(so->vma->obj, I915_MAP_WB);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -89,7 +86,7 @@ static int render_state_setup(struct intel_renderstate *so,
if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
- goto err;
+ goto out;
d[i++] = s;
s = upper_32_bits(r);
@@ -103,7 +100,7 @@ static int render_state_setup(struct intel_renderstate *so,
if (rodata->reloc[reloc_index] != -1) {
drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
- goto err;
+ goto out;
}
so->batch_offset = i915_ggtt_offset(so->vma);
@@ -150,19 +147,11 @@ static int render_state_setup(struct intel_renderstate *so,
*/
so->aux_size = ALIGN(so->aux_size, 8);
- if (needs_clflush)
- drm_clflush_virt_range(d, i * sizeof(u32));
- kunmap_atomic(d);
-
ret = 0;
out:
- i915_gem_object_finish_access(so->vma->obj);
+ __i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
+ __i915_gem_object_release_map(so->vma->obj);
return ret;
-
-err:
- kunmap_atomic(d);
- ret = -EINVAL;
- goto out;
}
#undef OUT_BATCH
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 39070b514e65..46a5ceffc22f 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -342,7 +342,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
{
struct intel_uncore *uncore = engine->uncore;
- u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
+ u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
i915_reg_t sfc_usage;
@@ -417,7 +417,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
static void gen11_unlock_sfc(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
- u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
+ u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
i915_reg_t sfc_forced_lock;
u32 sfc_forced_lock_bit;
@@ -638,7 +638,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
bool intel_has_gpu_reset(const struct intel_gt *gt)
{
- if (!i915_modparams.reset)
+ if (!gt->i915->params.reset)
return NULL;
return intel_get_gpu_reset(gt);
@@ -646,7 +646,7 @@ bool intel_has_gpu_reset(const struct intel_gt *gt)
bool intel_has_reset_engine(const struct intel_gt *gt)
{
- if (i915_modparams.reset < 2)
+ if (gt->i915->params.reset < 2)
return false;
return INTEL_INFO(gt->i915)->has_reset_engine;
@@ -880,7 +880,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
return true;
/* Never fully initialised, recovery impossible */
- if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
+ if (intel_gt_has_unrecoverable_error(gt))
return false;
GT_TRACE(gt, "start\n");
@@ -930,7 +930,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* Warn CI about the unrecoverable wedged condition.
* Time for a reboot.
*/
- add_taint_for_CI(TAINT_WARN);
+ add_taint_for_CI(gt->i915, TAINT_WARN);
return false;
}
@@ -1038,7 +1038,7 @@ void intel_gt_reset(struct intel_gt *gt,
awake = reset_prepare(gt);
if (!intel_has_gpu_reset(gt)) {
- if (i915_modparams.reset)
+ if (gt->i915->params.reset)
drm_err(&gt->i915->drm, "GPU reset not supported\n");
else
drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
@@ -1097,7 +1097,7 @@ taint:
* rather than continue on into oblivion. For everyone else,
* the system should still plod along, but they have been warned!
*/
- add_taint_for_CI(TAINT_WARN);
+ add_taint_for_CI(gt->i915, TAINT_WARN);
error:
__intel_gt_set_wedged(gt);
goto finish;
@@ -1246,7 +1246,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
*/
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
+ engine_mask &= gt->info.engine_mask;
if (flags & I915_ERROR_CAPTURE) {
i915_capture_error_state(gt->i915);
@@ -1342,7 +1342,7 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
if (!intel_gt_is_wedged(gt))
return 0;
- if (intel_gt_has_init_error(gt))
+ if (intel_gt_has_unrecoverable_error(gt))
return -EIO;
/* Reset still in progress? Maybe we will recover? */
@@ -1360,6 +1360,15 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
I915_WEDGED_ON_INIT);
intel_gt_set_wedged(gt);
set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+
+ /* Wedged on init is non-recoverable */
+ add_taint_for_CI(gt->i915, TAINT_WARN);
+}
+
+void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
+{
+ intel_gt_set_wedged(gt);
+ set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
}
void intel_gt_init_reset(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 8e8d5f761166..a0eec7c11c0c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -47,8 +47,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt);
/*
* There's no unset_wedged_on_init paired with this one.
* Once we're wedged on init, there's no going back.
+ * Same thing for unset_wedged_on_fini.
*/
void intel_gt_set_wedged_on_init(struct intel_gt *gt);
+void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
@@ -71,14 +73,6 @@ void __intel_fini_wedge(struct intel_wedge_me *w);
(W)->gt; \
__intel_fini_wedge((W)))
-static inline bool __intel_reset_failed(const struct intel_reset *reset)
-{
- GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ?
- !test_bit(I915_WEDGED, &reset->flags) : false);
-
- return unlikely(test_bit(I915_WEDGED, &reset->flags));
-}
-
bool intel_has_gpu_reset(const struct intel_gt *gt);
bool intel_has_reset_engine(const struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index f43bc3a0fe4f..add6b86d9d03 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -34,12 +34,17 @@ struct intel_reset {
* longer use the GPU - similar to #I915_WEDGED bit. The difference in
* in the way we're handling "forced" unwedged (e.g. through debugfs),
* which is not allowed in case we failed to initialize.
+ *
+ * #I915_WEDGED_ON_FINI - Similar to #I915_WEDGED_ON_INIT, except we
+ * use it to mark that the GPU is no longer available (and prevent
+ * users from using it).
*/
unsigned long flags;
#define I915_RESET_BACKOFF 0
#define I915_RESET_MODESET 1
#define I915_RESET_ENGINE 2
-#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2)
+#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 3)
+#define I915_WEDGED_ON_FINI (BITS_PER_LONG - 2)
#define I915_WEDGED (BITS_PER_LONG - 1)
struct mutex mutex; /* serialises wedging/unwedging */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index ca7286e58409..94915f668715 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -27,21 +27,15 @@
*
*/
-#include <linux/log2.h>
-
-#include "gem/i915_gem_context.h"
-
+#include "gen2_engine_cs.h"
+#include "gen6_engine_cs.h"
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_context.h"
#include "intel_gt.h"
-#include "intel_gt_irq.h"
-#include "intel_gt_pm_irq.h"
#include "intel_reset.h"
#include "intel_ring.h"
-#include "intel_workarounds.h"
#include "shmem_utils.h"
/* Rough estimate of the typical request size, performing a flush,
@@ -49,436 +43,6 @@
*/
#define LEGACY_REQUEST_SIZE 200
-static int
-gen2_render_ring_flush(struct i915_request *rq, u32 mode)
-{
- unsigned int num_store_dw;
- u32 cmd, *cs;
-
- cmd = MI_FLUSH;
- num_store_dw = 0;
- if (mode & EMIT_INVALIDATE)
- cmd |= MI_READ_FLUSH;
- if (mode & EMIT_FLUSH)
- num_store_dw = 4;
-
- cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = cmd;
- while (num_store_dw--) {
- *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_DEFAULT);
- *cs++ = 0;
- }
- *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int
-gen4_render_ring_flush(struct i915_request *rq, u32 mode)
-{
- u32 cmd, *cs;
- int i;
-
- /*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
- */
-
- cmd = MI_FLUSH;
- if (mode & EMIT_INVALIDATE) {
- cmd |= MI_EXE_FLUSH;
- if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
- cmd |= MI_INVALIDATE_ISP;
- }
-
- i = 2;
- if (mode & EMIT_INVALIDATE)
- i += 20;
-
- cs = intel_ring_begin(rq, i);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = cmd;
-
- /*
- * A random delay to let the CS invalidate take effect? Without this
- * delay, the GPU relocation path fails as the CS does not see
- * the updated contents. Just as important, if we apply the flushes
- * to the EMIT_FLUSH branch (i.e. immediately after the relocation
- * write and before the invalidate on the next batch), the relocations
- * still fail. This implies that is a delay following invalidation
- * that is required to reset the caches as opposed to a delay to
- * ensure the memory is written.
- */
- if (mode & EMIT_INVALIDATE) {
- *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_DEFAULT) |
- PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0;
- *cs++ = 0;
-
- for (i = 0; i < 12; i++)
- *cs++ = MI_FLUSH;
-
- *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_DEFAULT) |
- PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0;
- *cs++ = 0;
- }
-
- *cs++ = cmd;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-/*
- * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
- * implementing two workarounds on gen6. From section 1.4.7.1
- * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
- *
- * [DevSNB-C+{W/A}] Before any depth stall flush (including those
- * produced by non-pipelined state commands), software needs to first
- * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
- * 0.
- *
- * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
- * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
- *
- * And the workaround for these two requires this workaround first:
- *
- * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
- * BEFORE the pipe-control with a post-sync op and no write-cache
- * flushes.
- *
- * And this last workaround is tricky because of the requirements on
- * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
- * volume 2 part 1:
- *
- * "1 of the following must also be set:
- * - Render Target Cache Flush Enable ([12] of DW1)
- * - Depth Cache Flush Enable ([0] of DW1)
- * - Stall at Pixel Scoreboard ([1] of DW1)
- * - Depth Stall ([13] of DW1)
- * - Post-Sync Operation ([13] of DW1)
- * - Notify Enable ([8] of DW1)"
- *
- * The cache flushes require the workaround flush that triggered this
- * one, so we can't use it. Depth stall would trigger the same.
- * Post-sync nonzero is what triggered this second workaround, so we
- * can't use that one either. Notify enable is IRQs, which aren't
- * really our business. That leaves only stall at scoreboard.
- */
-static int
-gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
-{
- u32 scratch_addr =
- intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
- u32 *cs;
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(5);
- *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
- *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0; /* low dword */
- *cs++ = 0; /* high dword */
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(5);
- *cs++ = PIPE_CONTROL_QW_WRITE;
- *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0;
- *cs++ = 0;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int
-gen6_render_ring_flush(struct i915_request *rq, u32 mode)
-{
- u32 scratch_addr =
- intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
- u32 *cs, flags = 0;
- int ret;
-
- /* Force SNB workarounds for PIPE_CONTROL flushes */
- ret = gen6_emit_post_sync_nonzero_flush(rq);
- if (ret)
- return ret;
-
- /* Just flush everything. Experiments have shown that reducing the
- * number of bits based on the write domains has little performance
- * impact.
- */
- if (mode & EMIT_FLUSH) {
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- /*
- * Ensure that any following seqno writes only happen
- * when the render cache is indeed flushed.
- */
- flags |= PIPE_CONTROL_CS_STALL;
- }
- if (mode & EMIT_INVALIDATE) {
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- /*
- * TLB invalidate requires a post-sync write.
- */
- flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
- }
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = flags;
- *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
- *cs++ = 0;
- *cs++ = 0;
-
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_QW_WRITE;
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_DEFAULT) |
- PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = 0;
-
- /* Finally we can flush and with it emit the breadcrumb */
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL);
- *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
- PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = rq->fence.seqno;
-
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-
-static int
-gen7_render_ring_cs_stall_wa(struct i915_request *rq)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
- *cs++ = 0;
- *cs++ = 0;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int
-gen7_render_ring_flush(struct i915_request *rq, u32 mode)
-{
- u32 scratch_addr =
- intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
- u32 *cs, flags = 0;
-
- /*
- * Ensure that any following seqno writes only happen when the render
- * cache is indeed flushed.
- *
- * Workaround: 4th PIPE_CONTROL command (except the ones with only
- * read-cache invalidate bits set) must have the CS_STALL bit set. We
- * don't try to be clever and just set it unconditionally.
- */
- flags |= PIPE_CONTROL_CS_STALL;
-
- /*
- * CS_STALL suggests at least a post-sync write.
- */
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-
- /* Just flush everything. Experiments have shown that reducing the
- * number of bits based on the write domains has little performance
- * impact.
- */
- if (mode & EMIT_FLUSH) {
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- }
- if (mode & EMIT_INVALIDATE) {
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
-
- /* Workaround: we must issue a pipe_control with CS-stall bit
- * set before a pipe_control command that has the state cache
- * invalidate bit set. */
- gen7_render_ring_cs_stall_wa(rq);
- }
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = flags;
- *cs++ = scratch_addr;
- *cs++ = 0;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL);
- *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
- *cs++ = rq->fence.seqno;
-
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-
-static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
-
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->fence.seqno;
-
- *cs++ = MI_USER_INTERRUPT;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-
-#define GEN7_XCS_WA 32
-static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- int i;
-
- GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
-
- *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
- MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->fence.seqno;
-
- for (i = 0; i < GEN7_XCS_WA; i++) {
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
- *cs++ = rq->fence.seqno;
- }
-
- *cs++ = MI_FLUSH_DW;
- *cs++ = 0;
- *cs++ = 0;
-
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-#undef GEN7_XCS_WA
-
static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
{
/*
@@ -865,32 +429,6 @@ static void reset_finish(struct intel_engine_cs *engine)
{
}
-static int rcs_resume(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *i915 = engine->i915;
- struct intel_uncore *uncore = engine->uncore;
-
- /*
- * Disable CONSTANT_BUFFER before it is loaded from the context
- * image. For as it is loaded, it is executed and the stored
- * address may no longer be valid, leading to a GPU hang.
- *
- * This imposes the requirement that userspace reload their
- * CONSTANT_BUFFER on every batch, fortunately a requirement
- * they are already accustomed to from before contexts were
- * enabled.
- */
- if (IS_GEN(i915, 4))
- intel_uncore_write(uncore, ECOSKPD,
- _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
-
- if (IS_GEN_RANGE(i915, 6, 7))
- intel_uncore_write(uncore, INSTPM,
- _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
-
- return xcs_resume(engine);
-}
-
static void reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
@@ -918,255 +456,6 @@ static void i9xx_submit_request(struct i915_request *request)
intel_ring_set_tail(request->ring, request->tail));
}
-static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
-
- *cs++ = MI_FLUSH;
-
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
- *cs++ = rq->fence.seqno;
-
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-
-#define GEN5_WA_STORES 8 /* must be at least 1! */
-static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- int i;
-
- GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
-
- *cs++ = MI_FLUSH;
-
- BUILD_BUG_ON(GEN5_WA_STORES < 1);
- for (i = 0; i < GEN5_WA_STORES; i++) {
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
- *cs++ = rq->fence.seqno;
- }
-
- *cs++ = MI_USER_INTERRUPT;
-
- rq->tail = intel_ring_offset(rq, cs);
- assert_ring_tail_valid(rq->ring, rq->tail);
-
- return cs;
-}
-#undef GEN5_WA_STORES
-
-static void
-gen5_irq_enable(struct intel_engine_cs *engine)
-{
- gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static void
-gen5_irq_disable(struct intel_engine_cs *engine)
-{
- gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static void
-i9xx_irq_enable(struct intel_engine_cs *engine)
-{
- engine->i915->irq_mask &= ~engine->irq_enable_mask;
- intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
- intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
-}
-
-static void
-i9xx_irq_disable(struct intel_engine_cs *engine)
-{
- engine->i915->irq_mask |= engine->irq_enable_mask;
- intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
-}
-
-static void
-i8xx_irq_enable(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *i915 = engine->i915;
-
- i915->irq_mask &= ~engine->irq_enable_mask;
- intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
- ENGINE_POSTING_READ16(engine, RING_IMR);
-}
-
-static void
-i8xx_irq_disable(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *i915 = engine->i915;
-
- i915->irq_mask |= engine->irq_enable_mask;
- intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
-}
-
-static int
-bsd_ring_flush(struct i915_request *rq, u32 mode)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_FLUSH;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
- return 0;
-}
-
-static void
-gen6_irq_enable(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
-
- /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
- ENGINE_POSTING_READ(engine, RING_IMR);
-
- gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static void
-gen6_irq_disable(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
- gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static void
-hsw_vebox_irq_enable(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
-
- /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
- ENGINE_POSTING_READ(engine, RING_IMR);
-
- gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static void
-hsw_vebox_irq_disable(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR, ~0);
- gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
-}
-
-static int
-i965_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 length,
- unsigned int dispatch_flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
- I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
- *cs++ = offset;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT SZ_256K
-#define I830_TLB_ENTRIES (2)
-#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
-static int
-i830_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- unsigned int dispatch_flags)
-{
- u32 *cs, cs_offset =
- intel_gt_scratch_offset(rq->engine->gt,
- INTEL_GT_SCRATCH_FIELD_DEFAULT);
-
- GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Evict the invalid PTE TLBs */
- *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
- *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
- *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
- *cs++ = cs_offset;
- *cs++ = 0xdeadbeef;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
- if (len > I830_BATCH_LIMIT)
- return -ENOSPC;
-
- cs = intel_ring_begin(rq, 6 + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Blit the batch (which has now all relocs applied) to the
- * stable batch scratch bo area (so that the CS never
- * stumbles over its tlb invalidation bug) ...
- */
- *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
- *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
- *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
- *cs++ = cs_offset;
- *cs++ = 4096;
- *cs++ = offset;
-
- *cs++ = MI_FLUSH;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- /* ... and execute it. */
- offset = cs_offset;
- }
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
- MI_BATCH_NON_SECURE);
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int
-i915_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- unsigned int dispatch_flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
- MI_BATCH_NON_SECURE);
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
static void __ring_context_fini(struct intel_context *ce)
{
i915_vma_put(ce->state);
@@ -1254,7 +543,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
vaddr, engine->context_size);
i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_map(obj);
}
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
@@ -1356,11 +645,11 @@ static inline int mi_set_context(struct i915_request *rq,
struct intel_context *ce,
u32 flags)
{
- struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
+ struct drm_i915_private *i915 = engine->i915;
enum intel_engine_id id;
const int num_engines =
- IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
+ IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
bool force_restore = false;
int len;
u32 *cs;
@@ -1471,7 +760,7 @@ static inline int mi_set_context(struct i915_request *rq,
static int remap_l3_slice(struct i915_request *rq, int slice)
{
- u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
+ u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)
@@ -1582,7 +871,7 @@ static int switch_context(struct i915_request *rq)
void **residuals = NULL;
int ret;
- GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
+ GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
if (engine->wa_ctx.vma->private != ce) {
@@ -1704,99 +993,6 @@ static void gen6_bsd_submit_request(struct i915_request *request)
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
-static int mi_flush_dw(struct i915_request *rq, u32 flags)
-{
- u32 cmd, *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cmd = MI_FLUSH_DW;
-
- /*
- * We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- /*
- * Bspec vol 1c.3 - blitter engine command streamer:
- * "If ENABLED, all TLBs will be invalidated once the flush
- * operation is complete. This bit is only valid when the
- * Post-Sync Operation field is a value of 1h or 3h."
- */
- cmd |= flags;
-
- *cs++ = cmd;
- *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = 0;
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
-{
- return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
-}
-
-static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
-{
- return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
-}
-
-static int
-hsw_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- unsigned int dispatch_flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
- /* bit0-7 is the length on GEN6+ */
- *cs++ = offset;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int
-gen6_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- unsigned int dispatch_flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE_I965);
- /* bit0-7 is the length on GEN6+ */
- *cs++ = offset;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-/* Blitter support (SandyBridge+) */
-
-static int gen6_ring_flush(struct i915_request *rq, u32 mode)
-{
- return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
-}
-
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
@@ -1843,11 +1039,11 @@ static void setup_irq(struct intel_engine_cs *engine)
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
} else if (INTEL_GEN(i915) >= 3) {
- engine->irq_enable = i9xx_irq_enable;
- engine->irq_disable = i9xx_irq_disable;
+ engine->irq_enable = gen3_irq_enable;
+ engine->irq_disable = gen3_irq_disable;
} else {
- engine->irq_enable = i8xx_irq_enable;
- engine->irq_disable = i8xx_irq_disable;
+ engine->irq_enable = gen2_irq_enable;
+ engine->irq_disable = gen2_irq_disable;
}
}
@@ -1874,7 +1070,7 @@ static void setup_common(struct intel_engine_cs *engine)
* equivalent to our next initial bread so we can elide
* engine->emit_init_breadcrumb().
*/
- engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
if (IS_GEN(i915, 5))
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
@@ -1883,11 +1079,11 @@ static void setup_common(struct intel_engine_cs *engine)
if (INTEL_GEN(i915) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
else if (INTEL_GEN(i915) >= 4)
- engine->emit_bb_start = i965_emit_bb_start;
+ engine->emit_bb_start = gen4_emit_bb_start;
else if (IS_I830(i915) || IS_I845G(i915))
engine->emit_bb_start = i830_emit_bb_start;
else
- engine->emit_bb_start = i915_emit_bb_start;
+ engine->emit_bb_start = gen3_emit_bb_start;
}
static void setup_rcs(struct intel_engine_cs *engine)
@@ -1900,25 +1096,23 @@ static void setup_rcs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
if (INTEL_GEN(i915) >= 7) {
- engine->emit_flush = gen7_render_ring_flush;
- engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
+ engine->emit_flush = gen7_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
} else if (IS_GEN(i915, 6)) {
- engine->emit_flush = gen6_render_ring_flush;
- engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
+ engine->emit_flush = gen6_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
} else if (IS_GEN(i915, 5)) {
- engine->emit_flush = gen4_render_ring_flush;
+ engine->emit_flush = gen4_emit_flush_rcs;
} else {
if (INTEL_GEN(i915) < 4)
- engine->emit_flush = gen2_render_ring_flush;
+ engine->emit_flush = gen2_emit_flush;
else
- engine->emit_flush = gen4_render_ring_flush;
+ engine->emit_flush = gen4_emit_flush_rcs;
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
if (IS_HASWELL(i915))
engine->emit_bb_start = hsw_emit_bb_start;
-
- engine->resume = rcs_resume;
}
static void setup_vcs(struct intel_engine_cs *engine)
@@ -1929,15 +1123,15 @@ static void setup_vcs(struct intel_engine_cs *engine)
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN(i915, 6))
engine->set_default_submission = gen6_bsd_set_default_submission;
- engine->emit_flush = gen6_bsd_ring_flush;
+ engine->emit_flush = gen6_emit_flush_vcs;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
if (IS_GEN(i915, 6))
- engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
else
- engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
} else {
- engine->emit_flush = bsd_ring_flush;
+ engine->emit_flush = gen4_emit_flush_vcs;
if (IS_GEN(i915, 5))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
@@ -1949,13 +1143,13 @@ static void setup_bcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- engine->emit_flush = gen6_ring_flush;
+ engine->emit_flush = gen6_emit_flush_xcs;
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
if (IS_GEN(i915, 6))
- engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
else
- engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
}
static void setup_vecs(struct intel_engine_cs *engine)
@@ -1964,12 +1158,12 @@ static void setup_vecs(struct intel_engine_cs *engine)
GEM_BUG_ON(INTEL_GEN(i915) < 7);
- engine->emit_flush = gen6_ring_flush;
+ engine->emit_flush = gen6_emit_flush_xcs;
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- engine->irq_enable = hsw_vebox_irq_enable;
- engine->irq_disable = hsw_vebox_irq_disable;
+ engine->irq_enable = hsw_irq_enable_vecs;
+ engine->irq_disable = hsw_irq_disable_vecs;
- engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
}
static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 2f59fc6df3c2..97ba14ad52e4 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -51,15 +51,16 @@ static void rps_timer(struct timer_list *t)
{
struct intel_rps *rps = from_timer(rps, t, timer);
struct intel_engine_cs *engine;
+ ktime_t dt, last, timestamp;
enum intel_engine_id id;
s64 max_busy[3] = {};
- ktime_t dt, last;
+ timestamp = 0;
for_each_engine(engine, rps_to_gt(rps), id) {
s64 busy;
int i;
- dt = intel_engine_get_busy_time(engine);
+ dt = intel_engine_get_busy_time(engine, &timestamp);
last = engine->stats.rps;
engine->stats.rps = dt;
@@ -69,16 +70,14 @@ static void rps_timer(struct timer_list *t)
swap(busy, max_busy[i]);
}
}
-
- dt = ktime_get();
last = rps->pm_timestamp;
- rps->pm_timestamp = dt;
+ rps->pm_timestamp = timestamp;
if (intel_rps_is_active(rps)) {
s64 busy;
int i;
- dt = ktime_sub(dt, last);
+ dt = ktime_sub(timestamp, last);
/*
* Our goal is to evaluate each engine independently, so we run
@@ -1063,11 +1062,12 @@ static bool gen6_rps_enable(struct intel_rps *rps)
static int chv_rps_max_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_gt *gt = rps_to_gt(rps);
u32 val;
val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
- switch (RUNTIME_INFO(i915)->sseu.eu_total) {
+ switch (gt->info.sseu.eu_total) {
case 8:
/* (2 * 4) config */
val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index d173271c7397..f1c039e1b5ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -60,10 +60,552 @@ intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
return hweight32(intel_sseu_get_subslices(sseu, slice));
}
-u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
+static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ int slice_stride = sseu->max_subslices * sseu->eu_stride;
+
+ return slice * slice_stride + subslice * sseu->eu_stride;
+}
+
+static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+ u16 eu_mask = 0;
+
+ for (i = 0; i < sseu->eu_stride; i++)
+ eu_mask |=
+ ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE);
+
+ return eu_mask;
+}
+
+static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
+ u16 eu_mask)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+
+ for (i = 0; i < sseu->eu_stride; i++)
+ sseu->eu_mask[offset + i] =
+ (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
+}
+
+static u16 compute_eu_total(const struct sseu_dev_info *sseu)
+{
+ u16 i, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
+ total += hweight8(sseu->eu_mask[i]);
+
+ return total;
+}
+
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
+ u8 s_en, u32 ss_en, u16 eu_en)
+{
+ int s, ss;
+
+ /* ss_en represents entire subslice mask across all slices */
+ GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
+ sizeof(ss_en) * BITS_PER_BYTE);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ if ((s_en & BIT(s)) == 0)
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+
+ intel_sseu_set_subslices(sseu, s, ss_en);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, s, ss))
+ sseu_set_eus(sseu, s, ss, eu_en);
+ }
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
+}
+
+static void gen12_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 dss_en;
+ u16 eu_en = 0;
+ u8 eu_en_fuse;
+ u8 s_en;
+ int eu;
+
+ /*
+ * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
+ * Instead of splitting these, provide userspace with an array
+ * of DSS to more closely represent the hardware resource.
+ */
+ intel_sseu_set_info(sseu, 1, 6, 16);
+
+ s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+ GEN11_GT_S_ENA_MASK;
+
+ dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE);
+
+ /* one bit per pair of EUs */
+ eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+ GEN11_EU_DIS_MASK);
+ for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+ if (eu_en_fuse & BIT(eu))
+ eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+ gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
+
+ /* TGL only supports slice-level power gating */
+ sseu->has_slice_pg = 1;
+}
+
+static void gen11_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 ss_en;
+ u8 eu_en;
+ u8 s_en;
+
+ if (IS_ELKHARTLAKE(gt->i915))
+ intel_sseu_set_info(sseu, 1, 4, 8);
+ else
+ intel_sseu_set_info(sseu, 1, 8, 8);
+
+ s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+ GEN11_GT_S_ENA_MASK;
+ ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE);
+
+ eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+ GEN11_EU_DIS_MASK);
+
+ gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
+
+ /* ICL has no power gating restrictions. */
+ sseu->has_slice_pg = 1;
+ sseu->has_subslice_pg = 1;
+ sseu->has_eu_pg = 1;
+}
+
+static void gen10_sseu_info_init(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+ const int eu_mask = 0xff;
+ u32 subslice_mask, eu_en;
+ int s, ss;
+
+ intel_sseu_set_info(sseu, 6, 4, 8);
+
+ sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
+ GEN10_F2_S_ENA_SHIFT;
+
+ /* Slice0 */
+ eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE0);
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
+ /* Slice1 */
+ sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE1);
+ sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
+ /* Slice2 */
+ sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
+ sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
+ /* Slice3 */
+ sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE2);
+ sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
+ /* Slice4 */
+ sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
+ sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
+ /* Slice5 */
+ sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~intel_uncore_read(uncore, GEN10_EU_DISABLE3);
+ sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
+
+ subslice_mask = (1 << 4) - 1;
+ subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ u32 subslice_mask_with_eus = subslice_mask;
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ if (sseu_get_eus(sseu, s, ss) == 0)
+ subslice_mask_with_eus &= ~BIT(ss);
+ }
+
+ /*
+ * Slice0 can have up to 3 subslices, but there are only 2 in
+ * slice1/2.
+ */
+ intel_sseu_set_subslices(sseu, s, s == 0 ?
+ subslice_mask_with_eus :
+ subslice_mask_with_eus & 0x3);
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * CNL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery.
+ */
+ sseu->eu_per_subslice =
+ intel_sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+ 0;
+
+ /* No restrictions on Power Gating */
+ sseu->has_slice_pg = 1;
+ sseu->has_subslice_pg = 1;
+ sseu->has_eu_pg = 1;
+}
+
+static void cherryview_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ u32 fuse;
+ u8 subslice_mask = 0;
+
+ fuse = intel_uncore_read(gt->uncore, CHV_FUSE_GT);
+
+ sseu->slice_mask = BIT(0);
+ intel_sseu_set_info(sseu, 1, 2, 8);
+
+ if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+ u8 disabled_mask =
+ ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
+ CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
+ (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
+ CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
+
+ subslice_mask |= BIT(0);
+ sseu_set_eus(sseu, 0, 0, ~disabled_mask);
+ }
+
+ if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+ u8 disabled_mask =
+ ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
+ CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
+ (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
+ CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
+
+ subslice_mask |= BIT(1);
+ sseu_set_eus(sseu, 0, 1, ~disabled_mask);
+ }
+
+ intel_sseu_set_subslices(sseu, 0, subslice_mask);
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * CHV expected to always have a uniform distribution of EU
+ * across subslices.
+ */
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
+ sseu->eu_total /
+ intel_sseu_subslice_total(sseu) :
+ 0;
+ /*
+ * CHV supports subslice power gating on devices with more than
+ * one subslice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ sseu->has_slice_pg = 0;
+ sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
+ sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_device_info *info = mkwrite_device_info(i915);
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 fuse2, eu_disable, subslice_mask;
+ const u8 eu_mask = 0xff;
+ int s, ss;
+
+ fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+ sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+
+ /* BXT has a single slice and at most 3 subslices. */
+ intel_sseu_set_info(sseu, IS_GEN9_LP(i915) ? 1 : 3,
+ IS_GEN9_LP(i915) ? 3 : 4, 8);
+
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ subslice_mask = (1 << sseu->max_subslices) - 1;
+ subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+ GEN9_F2_SS_DIS_SHIFT);
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < sseu->max_slices; s++) {
+ if (!(sseu->slice_mask & BIT(s)))
+ /* skip disabled slice */
+ continue;
+
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
+
+ eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s));
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ int eu_per_ss;
+ u8 eu_disabled_mask;
+
+ if (!intel_sseu_has_subslice(sseu, s, ss))
+ /* skip disabled subslice */
+ continue;
+
+ eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
+
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+
+ eu_per_ss = sseu->max_eus_per_subslice -
+ hweight8(eu_disabled_mask);
+
+ /*
+ * Record which subslice(s) has(have) 7 EUs. we
+ * can tune the hash used to spread work among
+ * subslices if they are unbalanced.
+ */
+ if (eu_per_ss == 7)
+ sseu->subslice_7eu[s] |= BIT(ss);
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * SKL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery. BXT is expected to be perfectly uniform in EU
+ * distribution.
+ */
+ sseu->eu_per_subslice =
+ intel_sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+ 0;
+
+ /*
+ * SKL+ supports slice power gating on devices with more than
+ * one slice, and supports EU power gating on devices with
+ * more than one EU pair per subslice. BXT+ supports subslice
+ * power gating on devices with more than one subslice, and
+ * supports EU power gating on devices with more than one EU
+ * pair per subslice.
+ */
+ sseu->has_slice_pg =
+ !IS_GEN9_LP(i915) && hweight8(sseu->slice_mask) > 1;
+ sseu->has_subslice_pg =
+ IS_GEN9_LP(i915) && intel_sseu_subslice_total(sseu) > 1;
+ sseu->has_eu_pg = sseu->eu_per_subslice > 2;
+
+ if (IS_GEN9_LP(i915)) {
+#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
+ info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
+
+ sseu->min_eu_in_pool = 0;
+ if (info->has_pooled_eu) {
+ if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
+ sseu->min_eu_in_pool = 3;
+ else if (IS_SS_DISABLED(1))
+ sseu->min_eu_in_pool = 6;
+ else
+ sseu->min_eu_in_pool = 9;
+ }
+#undef IS_SS_DISABLED
+ }
+}
+
+static void bdw_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ int s, ss;
+ u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
+ u32 eu_disable0, eu_disable1, eu_disable2;
+
+ fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+ sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ intel_sseu_set_info(sseu, 3, 3, 8);
+
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
+ subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+ GEN8_F2_SS_DIS_SHIFT);
+ eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0);
+ eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1);
+ eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2);
+ eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK;
+ eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) |
+ ((eu_disable1 & GEN8_EU_DIS1_S1_MASK) <<
+ (32 - GEN8_EU_DIS0_S1_SHIFT));
+ eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) |
+ ((eu_disable2 & GEN8_EU_DIS2_S2_MASK) <<
+ (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < sseu->max_slices; s++) {
+ if (!(sseu->slice_mask & BIT(s)))
+ /* skip disabled slice */
+ continue;
+
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ u8 eu_disabled_mask;
+ u32 n_disabled;
+
+ if (!intel_sseu_has_subslice(sseu, s, ss))
+ /* skip disabled subslice */
+ continue;
+
+ eu_disabled_mask =
+ eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
+
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+
+ n_disabled = hweight8(eu_disabled_mask);
+
+ /*
+ * Record which subslices have 7 EUs.
+ */
+ if (sseu->max_eus_per_subslice - n_disabled == 7)
+ sseu->subslice_7eu[s] |= 1 << ss;
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * BDW is expected to always have a uniform distribution of EU across
+ * subslices with the exception that any one EU in any one subslice may
+ * be fused off for die recovery.
+ */
+ sseu->eu_per_subslice =
+ intel_sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+ 0;
+
+ /*
+ * BDW supports slice power gating on devices with more than
+ * one slice.
+ */
+ sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
+ sseu->has_subslice_pg = 0;
+ sseu->has_eu_pg = 0;
+}
+
+static void hsw_sseu_info_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ u32 fuse1;
+ u8 subslice_mask = 0;
+ int s, ss;
+
+ /*
+ * There isn't a register to tell us how many slices/subslices. We
+ * work off the PCI-ids here.
+ */
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ MISSING_CASE(INTEL_INFO(i915)->gt);
+ fallthrough;
+ case 1:
+ sseu->slice_mask = BIT(0);
+ subslice_mask = BIT(0);
+ break;
+ case 2:
+ sseu->slice_mask = BIT(0);
+ subslice_mask = BIT(0) | BIT(1);
+ break;
+ case 3:
+ sseu->slice_mask = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
+ break;
+ }
+
+ fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
+ switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
+ default:
+ MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
+ HSW_F1_EU_DIS_SHIFT);
+ fallthrough;
+ case HSW_F1_EU_DIS_10EUS:
+ sseu->eu_per_subslice = 10;
+ break;
+ case HSW_F1_EU_DIS_8EUS:
+ sseu->eu_per_subslice = 8;
+ break;
+ case HSW_F1_EU_DIS_6EUS:
+ sseu->eu_per_subslice = 6;
+ break;
+ }
+
+ intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
+ hweight8(subslice_mask),
+ sseu->eu_per_subslice);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ sseu_set_eus(sseu, s, ss,
+ (1UL << sseu->eu_per_subslice) - 1);
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /* No powergating for you. */
+ sseu->has_slice_pg = 0;
+ sseu->has_subslice_pg = 0;
+ sseu->has_eu_pg = 0;
+}
+
+void intel_sseu_info_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_HASWELL(i915))
+ hsw_sseu_info_init(gt);
+ else if (IS_CHERRYVIEW(i915))
+ cherryview_sseu_info_init(gt);
+ else if (IS_BROADWELL(i915))
+ bdw_sseu_info_init(gt);
+ else if (IS_GEN(i915, 9))
+ gen9_sseu_info_init(gt);
+ else if (IS_GEN(i915, 10))
+ gen10_sseu_info_init(gt);
+ else if (IS_GEN(i915, 11))
+ gen11_sseu_info_init(gt);
+ else if (INTEL_GEN(i915) >= 12)
+ gen12_sseu_info_init(gt);
+}
+
+u32 intel_sseu_make_rpcs(struct intel_gt *gt,
const struct intel_sseu *req_sseu)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ struct drm_i915_private *i915 = gt->i915;
+ const struct sseu_dev_info *sseu = &gt->info.sseu;
bool subslice_pg = sseu->has_subslice_pg;
u8 slices, subslices;
u32 rpcs = 0;
@@ -173,3 +715,48 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
return rpcs;
}
+
+void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
+{
+ int s;
+
+ drm_printf(p, "slice total: %u, mask=%04x\n",
+ hweight8(sseu->slice_mask), sseu->slice_mask);
+ drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
+ for (s = 0; s < sseu->max_slices; s++) {
+ drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
+ s, intel_sseu_subslices_per_slice(sseu, s),
+ intel_sseu_get_subslices(sseu, s));
+ }
+ drm_printf(p, "EU total: %u\n", sseu->eu_total);
+ drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
+ drm_printf(p, "has slice power gating: %s\n",
+ yesno(sseu->has_slice_pg));
+ drm_printf(p, "has subslice power gating: %s\n",
+ yesno(sseu->has_subslice_pg));
+ drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
+}
+
+void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ int s, ss;
+
+ if (sseu->max_slices == 0) {
+ drm_printf(p, "Unavailable\n");
+ return;
+ }
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
+ s, intel_sseu_subslices_per_slice(sseu, s),
+ intel_sseu_get_subslices(sseu, s));
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ u16 enabled_eus = sseu_get_eus(sseu, s, ss);
+
+ drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
+ ss, hweight16(enabled_eus), enabled_eus);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index d1d225204f09..23ba6c2ebe70 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -13,6 +13,8 @@
#include "i915_gem.h"
struct drm_i915_private;
+struct intel_gt;
+struct drm_printer;
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
@@ -94,7 +96,13 @@ u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
u32 ss_mask);
-u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
+void intel_sseu_info_init(struct intel_gt *gt);
+
+u32 intel_sseu_make_rpcs(struct intel_gt *gt,
const struct intel_sseu *req_sseu);
+void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p);
+void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p);
+
#endif /* __INTEL_SSEU_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
new file mode 100644
index 000000000000..51780282d872
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "debugfs_gt.h"
+#include "intel_sseu_debugfs.h"
+#include "i915_drv.h"
+
+static void sseu_copy_subslices(const struct sseu_dev_info *sseu,
+ int slice, u8 *to_mask)
+{
+ int offset = slice * sseu->ss_stride;
+
+ memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
+}
+
+static void cherryview_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+#define SS_MAX 2
+ struct intel_uncore *uncore = gt->uncore;
+ const int ss_max = SS_MAX;
+ u32 sig1[SS_MAX], sig2[SS_MAX];
+ int ss;
+
+ sig1[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG1);
+ sig1[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG1);
+ sig2[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG2);
+ sig2[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG2);
+
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ if (sig1[ss] & CHV_SS_PG_ENABLE)
+ /* skip disabled subslice */
+ continue;
+
+ sseu->slice_mask = BIT(0);
+ sseu->subslice_mask[0] |= BIT(ss);
+ eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+ ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice, eu_cnt);
+ }
+#undef SS_MAX
+}
+
+static void gen10_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+#define SS_MAX 6
+ struct intel_uncore *uncore = gt->uncore;
+ const struct intel_gt_info *info = &gt->info;
+ u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
+ int s, ss;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ /*
+ * FIXME: Valid SS Mask respects the spec and read
+ * only valid bits for those registers, excluding reserved
+ * although this seems wrong because it would leave many
+ * subslices without ACK.
+ */
+ s_reg[s] = intel_uncore_read(uncore, GEN10_SLICE_PGCTL_ACK(s)) &
+ GEN10_PGCTL_VALID_SS_MASK(s);
+ eu_reg[2 * s] = intel_uncore_read(uncore,
+ GEN10_SS01_EU_PGCTL_ACK(s));
+ eu_reg[2 * s + 1] = intel_uncore_read(uncore,
+ GEN10_SS23_EU_PGCTL_ACK(s));
+ }
+
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+ sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
+
+ for (ss = 0; ss < info->sseu.max_subslices; ss++) {
+ unsigned int eu_cnt;
+
+ if (info->sseu.has_subslice_pg &&
+ !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ /* skip disabled subslice */
+ continue;
+
+ eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
+ eu_mask[ss % 2]);
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice,
+ eu_cnt);
+ }
+ }
+#undef SS_MAX
+}
+
+static void gen9_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+#define SS_MAX 3
+ struct intel_uncore *uncore = gt->uncore;
+ const struct intel_gt_info *info = &gt->info;
+ u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
+ int s, ss;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ s_reg[s] = intel_uncore_read(uncore, GEN9_SLICE_PGCTL_ACK(s));
+ eu_reg[2 * s] =
+ intel_uncore_read(uncore, GEN9_SS01_EU_PGCTL_ACK(s));
+ eu_reg[2 * s + 1] =
+ intel_uncore_read(uncore, GEN9_SS23_EU_PGCTL_ACK(s));
+ }
+
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+
+ if (IS_GEN9_BC(gt->i915))
+ sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
+
+ for (ss = 0; ss < info->sseu.max_subslices; ss++) {
+ unsigned int eu_cnt;
+ u8 ss_idx = s * info->sseu.ss_stride +
+ ss / BITS_PER_BYTE;
+
+ if (IS_GEN9_LP(gt->i915)) {
+ if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ /* skip disabled subslice */
+ continue;
+
+ sseu->subslice_mask[ss_idx] |=
+ BIT(ss % BITS_PER_BYTE);
+ }
+
+ eu_cnt = eu_reg[2 * s + ss / 2] & eu_mask[ss % 2];
+ eu_cnt = 2 * hweight32(eu_cnt);
+
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice,
+ eu_cnt);
+ }
+ }
+#undef SS_MAX
+}
+
+static void bdw_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+ const struct intel_gt_info *info = &gt->info;
+ u32 slice_info = intel_uncore_read(gt->uncore, GEN8_GT_SLICE_INFO);
+ int s;
+
+ sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
+
+ if (sseu->slice_mask) {
+ sseu->eu_per_subslice = info->sseu.eu_per_subslice;
+ for (s = 0; s < fls(sseu->slice_mask); s++)
+ sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
+ sseu->eu_total = sseu->eu_per_subslice *
+ intel_sseu_subslice_total(sseu);
+
+ /* subtract fused off EU(s) from enabled slice(s) */
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ u8 subslice_7eu = info->sseu.subslice_7eu[s];
+
+ sseu->eu_total -= hweight8(subslice_7eu);
+ }
+ }
+}
+
+static void i915_print_sseu_info(struct seq_file *m,
+ bool is_available_info,
+ bool has_pooled_eu,
+ const struct sseu_dev_info *sseu)
+{
+ const char *type = is_available_info ? "Available" : "Enabled";
+ int s;
+
+ seq_printf(m, " %s Slice Mask: %04x\n", type,
+ sseu->slice_mask);
+ seq_printf(m, " %s Slice Total: %u\n", type,
+ hweight8(sseu->slice_mask));
+ seq_printf(m, " %s Subslice Total: %u\n", type,
+ intel_sseu_subslice_total(sseu));
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ seq_printf(m, " %s Slice%i subslices: %u\n", type,
+ s, intel_sseu_subslices_per_slice(sseu, s));
+ }
+ seq_printf(m, " %s EU Total: %u\n", type,
+ sseu->eu_total);
+ seq_printf(m, " %s EU Per Subslice: %u\n", type,
+ sseu->eu_per_subslice);
+
+ if (!is_available_info)
+ return;
+
+ seq_printf(m, " Has Pooled EU: %s\n", yesno(has_pooled_eu));
+ if (has_pooled_eu)
+ seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
+
+ seq_printf(m, " Has Slice Power Gating: %s\n",
+ yesno(sseu->has_slice_pg));
+ seq_printf(m, " Has Subslice Power Gating: %s\n",
+ yesno(sseu->has_subslice_pg));
+ seq_printf(m, " Has EU Power Gating: %s\n",
+ yesno(sseu->has_eu_pg));
+}
+
+/*
+ * this is called from top-level debugfs as well, so we can't get the gt from
+ * the seq_file.
+ */
+int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ const struct intel_gt_info *info = &gt->info;
+ struct sseu_dev_info sseu;
+ intel_wakeref_t wakeref;
+
+ if (INTEL_GEN(i915) < 8)
+ return -ENODEV;
+
+ seq_puts(m, "SSEU Device Info\n");
+ i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu);
+
+ seq_puts(m, "SSEU Device Status\n");
+ memset(&sseu, 0, sizeof(sseu));
+ intel_sseu_set_info(&sseu, info->sseu.max_slices,
+ info->sseu.max_subslices,
+ info->sseu.max_eus_per_subslice);
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ if (IS_CHERRYVIEW(i915))
+ cherryview_sseu_device_status(gt, &sseu);
+ else if (IS_BROADWELL(i915))
+ bdw_sseu_device_status(gt, &sseu);
+ else if (IS_GEN(i915, 9))
+ gen9_sseu_device_status(gt, &sseu);
+ else if (INTEL_GEN(i915) >= 10)
+ gen10_sseu_device_status(gt, &sseu);
+ }
+
+ i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), &sseu);
+
+ return 0;
+}
+
+static int sseu_status_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+
+ return intel_sseu_status(m, gt);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(sseu_status);
+
+static int rcs_topology_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_sseu_print_topology(&gt->info.sseu, &p);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(rcs_topology);
+
+void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "sseu_status", &sseu_status_fops, NULL },
+ { "rcs_topology", &rcs_topology_fops, NULL },
+ };
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h
new file mode 100644
index 000000000000..73f001589e90
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef INTEL_SSEU_DEBUGFS_H
+#define INTEL_SSEU_DEBUGFS_H
+
+struct intel_gt;
+struct dentry;
+struct seq_file;
+
+int intel_sseu_status(struct seq_file *m, struct intel_gt *gt);
+void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* INTEL_SSEU_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 4546284fede1..46d20f5f3ddc 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -73,6 +73,8 @@ hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
return vma;
}
+ GT_TRACE(timeline->gt, "new HWSP allocated\n");
+
vma->private = hwsp;
hwsp->gt = timeline->gt;
hwsp->vma = vma;
@@ -327,6 +329,8 @@ int intel_timeline_pin(struct intel_timeline *tl)
tl->hwsp_offset =
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
+ GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+ tl->fence_context, tl->hwsp_offset);
cacheline_acquire(tl->hwsp_cacheline);
if (atomic_fetch_inc(&tl->pin_count)) {
@@ -434,6 +438,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
int err;
might_lock(&tl->gt->ggtt->vm.mutex);
+ GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context);
/*
* If there is an outstanding GPU reference to this cacheline,
@@ -497,6 +502,8 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
tl->hwsp_offset += i915_ggtt_offset(vma);
+ GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+ tl->fence_context, tl->hwsp_offset);
cacheline_acquire(cl);
tl->hwsp_cacheline = cl;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 85d2bef51524..5726cd0a37e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -205,6 +205,18 @@ wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
#define WA_SET_FIELD_MASKED(addr, mask, value) \
wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
+static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+}
+
+static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+}
+
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
@@ -355,7 +367,10 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
- if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
+ if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
@@ -389,7 +404,7 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = engine->i915;
+ struct intel_gt *gt = engine->gt;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -400,7 +415,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
- if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
+ if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
continue;
/*
@@ -409,7 +424,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
*
* -> 0 <= ss <= 3;
*/
- ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
+ ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
@@ -600,11 +615,14 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
* Wa_1604555607:gen12 and Wa_1608008084:gen12
* FF_MODE2 register will return the wrong value when read. The default
* value for this register is zero for all fields and there are no bit
- * masks. So instead of doing a RMW we should just write the TDS timer
- * value for Wa_1604555607.
+ * masks. So instead of doing a RMW we should just write the GS Timer
+ * and TDS timer values for Wa_1604555607 and Wa_16011163337.
*/
- wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
- FF_MODE2_TDS_TIMER_128, 0);
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK | FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_GS_TIMER_224 | FF_MODE2_TDS_TIMER_128,
+ 0);
/* WaDisableGPGPUMidThreadPreemption:tgl */
WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
@@ -630,7 +648,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine, wal);
- else if (IS_COFFEELAKE(i915))
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
cfl_ctx_workarounds_init(engine, wal);
else if (IS_GEMINILAKE(i915))
glk_ctx_workarounds_init(engine, wal);
@@ -644,6 +662,10 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
chv_ctx_workarounds_init(engine, wal);
else if (IS_BROADWELL(i915))
bdw_ctx_workarounds_init(engine, wal);
+ else if (IS_GEN(i915, 7))
+ gen7_ctx_workarounds_init(engine, wal);
+ else if (IS_GEN(i915, 6))
+ gen6_ctx_workarounds_init(engine, wal);
else if (INTEL_GEN(i915) < 8)
return;
else
@@ -917,7 +939,7 @@ static void
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
/* WaDisableKillLogic:bxt,skl,kbl */
- if (!IS_COFFEELAKE(i915))
+ if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
wa_write_or(wal,
GAM_ECOCHK,
ECOCHK_DIS_TLB);
@@ -1014,7 +1036,7 @@ cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
unsigned int slice, subslice;
u32 l3_en, mcr, mcr_mask;
@@ -1180,7 +1202,7 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915, wal);
- else if (IS_COFFEELAKE(i915))
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
cfl_gt_workarounds_init(i915, wal);
else if (IS_GEMINILAKE(i915))
glk_gt_workarounds_init(i915, wal);
@@ -1428,6 +1450,18 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_RANGE_4);
}
+static void cml_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ whitelist_reg_ext(w,
+ RING_CTX_TIMESTAMP(engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
+
+ cfl_whitelist_build(engine);
+}
+
static void cnl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
@@ -1478,9 +1512,15 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
/* hucStatus2RegOffset */
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
+ whitelist_reg_ext(w,
+ RING_CTX_TIMESTAMP(engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
default:
+ whitelist_reg_ext(w,
+ RING_CTX_TIMESTAMP(engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
}
}
@@ -1512,6 +1552,9 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
whitelist_reg(w, HIZ_CHICKEN);
break;
default:
+ whitelist_reg_ext(w,
+ RING_CTX_TIMESTAMP(engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
}
}
@@ -1529,6 +1572,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
icl_whitelist_build(engine);
else if (IS_CANNONLAKE(i915))
cnl_whitelist_build(engine);
+ else if (IS_COMETLAKE(i915))
+ cml_whitelist_build(engine);
else if (IS_COFFEELAKE(i915))
cfl_whitelist_build(engine);
else if (IS_GEMINILAKE(i915))
@@ -1604,11 +1649,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
- /* Wa_1407928979:tgl */
- wa_write_or(wal,
- GEN7_FF_THREAD_MODE,
- GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
-
/* Wa_1408615072:tgl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
VSUNIT_CLKGATE_DIS_TGL);
@@ -1632,6 +1672,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_14010229206:tgl
*/
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+
+ /*
+ * Wa_1407928979:tgl A*
+ * Wa_18011464164:tgl B0+
+ * Wa_22010931296:tgl B0+
+ */
+ wa_write_or(wal, GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
}
if (IS_GEN(i915, 11)) {
@@ -1725,6 +1773,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /* Wa_22010271021:ehl */
+ if (IS_ELKHARTLAKE(i915))
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_GEN_RANGE(i915, 9, 12)) {
@@ -1734,7 +1788,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
}
- if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
wa_write_or(wal,
GEN8_GARBCNTL,
@@ -1818,6 +1875,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
+
+ if (IS_GEN(i915, 4))
+ /*
+ * Disable CONSTANT_BUFFER before it is loaded from the context
+ * image. For as it is loaded, it is executed and the stored
+ * address may no longer be valid, leading to a GPU hang.
+ *
+ * This imposes the requirement that userspace reload their
+ * CONSTANT_BUFFER on every batch, fortunately a requirement
+ * they are already accustomed to from before contexts were
+ * enabled.
+ */
+ wa_add(wal, ECOSKPD,
+ 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
+ 0 /* XXX bit doesn't stick on Broadwater */);
}
static void
@@ -1932,7 +2004,7 @@ wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
- struct drm_i915_private *i915 = rq->i915;
+ struct drm_i915_private *i915 = rq->engine->i915;
unsigned int i, count = 0;
const struct i915_wa *wa;
u32 srm, *cs;
@@ -2021,7 +2093,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
err = 0;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
- if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
+ if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
continue;
if (!wa_verify(wa, results[i], wal->name, from))
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index f88e445a1cae..729c3c7b11e2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -49,7 +49,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(rq->engine->i915) >= 8)
cmd++;
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 697114dd1f47..73243ba59c7d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -10,6 +10,7 @@
#include "intel_gt_requests.h"
#include "i915_selftest.h"
+#include "selftest_engine_heartbeat.h"
static int timeline_sync(struct intel_timeline *tl)
{
@@ -142,24 +143,6 @@ out:
return err;
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine,
- unsigned long *saved)
-{
- *saved = engine->props.heartbeat_interval_ms;
- engine->props.heartbeat_interval_ms = 0;
-
- intel_engine_pm_get(engine);
- intel_engine_park_heartbeat(engine);
-}
-
-static void engine_heartbeat_enable(struct intel_engine_cs *engine,
- unsigned long saved)
-{
- intel_engine_pm_put(engine);
-
- engine->props.heartbeat_interval_ms = saved;
-}
-
static int live_idle_flush(void *arg)
{
struct intel_gt *gt = arg;
@@ -170,11 +153,9 @@ static int live_idle_flush(void *arg)
/* Check that we can flush the idle barriers */
for_each_engine(engine, gt, id) {
- unsigned long heartbeat;
-
- engine_heartbeat_disable(engine, &heartbeat);
+ st_engine_heartbeat_disable(engine);
err = __live_idle_pulse(engine, intel_engine_flush_barriers);
- engine_heartbeat_enable(engine, heartbeat);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
}
@@ -192,11 +173,9 @@ static int live_idle_pulse(void *arg)
/* Check that heartbeat pulses flush the idle barriers */
for_each_engine(engine, gt, id) {
- unsigned long heartbeat;
-
- engine_heartbeat_disable(engine, &heartbeat);
+ st_engine_heartbeat_disable(engine);
err = __live_idle_pulse(engine, intel_engine_pulse);
- engine_heartbeat_enable(engine, heartbeat);
+ st_engine_heartbeat_enable(engine);
if (err && err != -ENODEV)
break;
@@ -386,11 +365,27 @@ int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- saved_hangcheck = i915_modparams.enable_hangcheck;
- i915_modparams.enable_hangcheck = INT_MAX;
+ saved_hangcheck = i915->params.enable_hangcheck;
+ i915->params.enable_hangcheck = INT_MAX;
err = intel_gt_live_subtests(tests, &i915->gt);
- i915_modparams.enable_hangcheck = saved_hangcheck;
+ i915->params.enable_hangcheck = saved_hangcheck;
return err;
}
+
+void st_engine_heartbeat_disable(struct intel_engine_cs *engine)
+{
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+void st_engine_heartbeat_enable(struct intel_engine_cs *engine)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms =
+ engine->defaults.heartbeat_interval_ms;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h
new file mode 100644
index 000000000000..cd27113d5400
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_ENGINE_HEARTBEAT_H
+#define SELFTEST_ENGINE_HEARTBEAT_H
+
+struct intel_engine_cs;
+
+void st_engine_heartbeat_disable(struct intel_engine_cs *engine);
+void st_engine_heartbeat_enable(struct intel_engine_cs *engine);
+
+#endif /* SELFTEST_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index cbf6b0735272..b08fc5390e8a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -6,7 +6,107 @@
#include "i915_selftest.h"
#include "selftest_engine.h"
+#include "selftest_engine_heartbeat.h"
#include "selftests/igt_atomic.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_spinner.h"
+
+static int live_engine_busy_stats(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * Check that if an engine supports busy-stats, they tell the truth.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ ktime_t de, dt;
+ ktime_t t[2];
+
+ if (!intel_engine_supports_stats(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ err = -EBUSY;
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+
+ ENGINE_TRACE(engine, "measuring idle time\n");
+ preempt_disable();
+ de = intel_engine_get_busy_time(engine, &t[0]);
+ udelay(100);
+ de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
+ preempt_enable();
+ dt = ktime_sub(t[1], t[0]);
+ if (de < 0 || de > 10) {
+ pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n",
+ engine->name,
+ de, (int)div64_u64(100 * de, dt), dt);
+ GEM_TRACE_DUMP();
+ err = -EINVAL;
+ goto end;
+ }
+
+ /* 100% busy */
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(engine->gt);
+ err = -ETIME;
+ goto end;
+ }
+
+ ENGINE_TRACE(engine, "measuring busy time\n");
+ preempt_disable();
+ de = intel_engine_get_busy_time(engine, &t[0]);
+ udelay(100);
+ de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
+ preempt_enable();
+ dt = ktime_sub(t[1], t[0]);
+ if (100 * de < 95 * dt || 95 * de > 100 * dt) {
+ pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n",
+ engine->name,
+ de, (int)div64_u64(100 * de, dt), dt);
+ GEM_TRACE_DUMP();
+ err = -EINVAL;
+ goto end;
+ }
+
+end:
+ st_engine_heartbeat_enable(engine);
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ return err;
+}
static int live_engine_pm(void *arg)
{
@@ -77,6 +177,7 @@ static int live_engine_pm(void *arg)
int live_engine_pm_selftests(struct intel_gt *gt)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_busy_stats),
SUBTEST(live_engine_pm),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 242181a5214c..6180a47c1b51 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -5,10 +5,141 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/sort.h>
+
+#include "intel_gt_clock_utils.h"
+
#include "selftest_llc.h"
#include "selftest_rc6.h"
#include "selftest_rps.h"
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static void measure_clocks(struct intel_engine_cs *engine,
+ u32 *out_cycles, ktime_t *out_dt)
+{
+ ktime_t dt[5];
+ u32 cycles[5];
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ preempt_disable();
+ cycles[i] = -ENGINE_READ_FW(engine, RING_TIMESTAMP);
+ dt[i] = ktime_get();
+
+ udelay(1000);
+
+ dt[i] = ktime_sub(ktime_get(), dt[i]);
+ cycles[i] += ENGINE_READ_FW(engine, RING_TIMESTAMP);
+ preempt_enable();
+ }
+
+ /* Use the median of both cycle/dt; close enough */
+ sort(cycles, 5, sizeof(*cycles), cmp_u32, NULL);
+ *out_cycles = (cycles[1] + 2 * cycles[2] + cycles[3]) / 4;
+
+ sort(dt, 5, sizeof(*dt), cmp_u64, NULL);
+ *out_dt = div_u64(dt[1] + 2 * dt[2] + dt[3], 4);
+}
+
+static int live_gt_clocks(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (!RUNTIME_INFO(gt->i915)->cs_timestamp_frequency_hz) { /* unknown */
+ pr_info("CS_TIMESTAMP frequency unknown\n");
+ return 0;
+ }
+
+ if (INTEL_GEN(gt->i915) < 4) /* Any CS_TIMESTAMP? */
+ return 0;
+
+ if (IS_GEN(gt->i915, 5))
+ /*
+ * XXX CS_TIMESTAMP low dword is dysfunctional?
+ *
+ * Ville's experiments indicate the high dword still works,
+ * but at a correspondingly reduced frequency.
+ */
+ return 0;
+
+ if (IS_GEN(gt->i915, 4))
+ /*
+ * XXX CS_TIMESTAMP appears gibberish
+ *
+ * Ville's experiments indicate that it mostly appears 'stuck'
+ * in that we see the register report the same cycle count
+ * for a couple of reads.
+ */
+ return 0;
+
+ intel_gt_pm_get(gt);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ for_each_engine(engine, gt, id) {
+ u32 cycles;
+ u32 expected;
+ u64 time;
+ u64 dt;
+
+ if (INTEL_GEN(engine->i915) < 7 && engine->id != RCS0)
+ continue;
+
+ measure_clocks(engine, &cycles, &dt);
+
+ time = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
+ expected = i915_cs_timestamp_ns_to_ticks(engine->i915, dt);
+
+ pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
+ engine->name, cycles, time, dt, expected,
+ RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz / 1000);
+
+ if (9 * time < 8 * dt || 8 * time > 9 * dt) {
+ pr_err("%s: CS ticks did not match walltime!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (9 * expected < 8 * cycles || 8 * expected > 9 * cycles) {
+ pr_err("%s: walltime did not match CS ticks!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ intel_gt_pm_put(gt);
+
+ return err;
+}
+
static int live_gt_resume(void *arg)
{
struct intel_gt *gt = arg;
@@ -52,6 +183,7 @@ static int live_gt_resume(void *arg)
int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_gt_clocks),
SUBTEST(live_rc6_manual),
SUBTEST(live_rps_clock_interval),
SUBTEST(live_rps_control),
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 4aa4cc917d8b..fb5ebf930ab2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -29,6 +29,7 @@
#include "intel_gt.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
+#include "selftest_engine_heartbeat.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@@ -203,12 +204,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
@@ -217,12 +218,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(gt->i915) >= 4) {
@@ -230,24 +231,24 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
} else {
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
- *batch++ = MI_ARB_CHECK;
+ *batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
}
@@ -310,22 +311,6 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
1000));
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine)
-{
- engine->props.heartbeat_interval_ms = 0;
-
- intel_engine_pm_get(engine);
- intel_engine_park_heartbeat(engine);
-}
-
-static void engine_heartbeat_enable(struct intel_engine_cs *engine)
-{
- intel_engine_pm_put(engine);
-
- engine->props.heartbeat_interval_ms =
- engine->defaults.heartbeat_interval_ms;
-}
-
static int igt_hang_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
@@ -482,7 +467,7 @@ static int igt_reset_nop_engine(void *arg)
reset_engine_count = i915_reset_engine_count(global, engine);
count = 0;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
int i;
@@ -499,6 +484,20 @@ static int igt_reset_nop_engine(void *arg)
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(gt);
+
err = PTR_ERR(rq);
break;
}
@@ -526,7 +525,7 @@ static int igt_reset_nop_engine(void *arg)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
@@ -576,7 +575,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine);
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
if (active) {
@@ -628,7 +627,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
@@ -805,10 +804,10 @@ static int __igt_reset_engines(struct intel_gt *gt,
threads[tmp].resets =
i915_reset_engine_count(global, other);
- if (!(flags & TEST_OTHERS))
+ if (other == engine && !(flags & TEST_SELF))
continue;
- if (other == engine && !(flags & TEST_SELF))
+ if (other != engine && !(flags & TEST_OTHERS))
continue;
threads[tmp].engine = other;
@@ -827,7 +826,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
yield(); /* start all threads before we begin */
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
@@ -866,13 +865,29 @@ static int __igt_reset_engines(struct intel_gt *gt,
count++;
if (rq) {
+ if (rq->fence.error != -EIO) {
+ pr_err("i915_reset_engine(%s:%s):"
+ " failed to reset request %llx:%lld\n",
+ engine->name, test_name,
+ rq->fence.context,
+ rq->fence.seqno);
+ i915_request_put(rq);
+
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ break;
+ }
+
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
- " failed to complete request after reset\n",
- engine->name, test_name);
+ " failed to complete request %llx:%lld after reset\n",
+ engine->name, test_name,
+ rq->fence.context,
+ rq->fence.seqno);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
i915_request_put(rq);
@@ -901,7 +916,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
@@ -983,7 +998,7 @@ static int igt_reset_engines(void *arg)
},
{
"self-priority",
- TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
+ TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
},
{ }
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 924bc01ef526..3fc5de961280 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_reset.h"
+#include "gt/selftest_engine_heartbeat.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@@ -51,22 +52,6 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
return vma;
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine)
-{
- engine->props.heartbeat_interval_ms = 0;
-
- intel_engine_pm_get(engine);
- intel_engine_park_heartbeat(engine);
-}
-
-static void engine_heartbeat_enable(struct intel_engine_cs *engine)
-{
- intel_engine_pm_put(engine);
-
- engine->props.heartbeat_interval_ms =
- engine->defaults.heartbeat_interval_ms;
-}
-
static bool is_active(struct i915_request *rq)
{
if (i915_request_is_active(rq))
@@ -75,7 +60,7 @@ static bool is_active(struct i915_request *rq)
if (i915_request_on_hold(rq))
return true;
- if (i915_request_started(rq))
+ if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
return true;
return false;
@@ -234,7 +219,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
err = -EIO;
break;
}
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
struct intel_context *tmp;
@@ -332,7 +317,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
i915_request_put(rq[0]);
err_ce:
- tasklet_kill(&engine->execlists.tasklet); /* flush submission */
+ intel_engine_flush_submission(engine);
igt_spinner_end(&spin);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
if (IS_ERR_OR_NULL(ce[n]))
@@ -342,7 +327,7 @@ err_ce:
intel_context_put(ce[n]);
}
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (igt_live_test_end(&t))
err = -EIO;
if (err)
@@ -363,6 +348,156 @@ static int live_unlite_preempt(void *arg)
return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
}
+static int live_unlite_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Setup a preemption event that will cause almost the entire ring
+ * to be unwound, potentially fooling our intel_ring_direction()
+ * into emitting a forward lite-restore instead of the rollback.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ /* Create max prio spinner, followed by N low prio nops */
+ rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (intel_ring_direction(ce[0]->ring,
+ rq->wa_tail,
+ ce[0]->ring->tail) <= 0) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
+ rq->tail,
+ ce[0]->ring->tail) <= 0);
+ i915_request_put(rq);
+
+ /* Create a second ring to preempt the first ring after rq[0] */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submitted\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
static int live_pin_rewind(void *arg)
{
struct intel_gt *gt = arg;
@@ -471,7 +606,7 @@ static int live_hold_reset(void *arg)
break;
}
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -531,7 +666,7 @@ static int live_hold_reset(void *arg)
i915_request_put(rq);
out:
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
intel_context_put(ce);
if (err)
break;
@@ -578,7 +713,7 @@ static int live_error_interrupt(void *arg)
const struct error_phase *p;
int err = 0;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
for (p = phases; p->error[0] != GOOD; p++) {
struct i915_request *client[ARRAY_SIZE(phases->error)];
@@ -677,7 +812,7 @@ out:
}
}
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err) {
intel_gt_set_wedged(gt);
return err;
@@ -828,7 +963,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
goto out;
if (i915_request_wait(head, 0,
- 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
+ 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
count, n);
GEM_TRACE_DUMP();
@@ -845,10 +980,11 @@ static int live_timeslice_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct i915_vma *vma;
void *vaddr;
int err = 0;
- int count;
/*
* If a request takes too long, we would like to give other users
@@ -885,26 +1021,21 @@ static int live_timeslice_preempt(void *arg)
if (err)
goto err_pin;
- for_each_prime_number_from(count, 1, 16) {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, gt, id) {
- if (!intel_engine_has_preemption(engine))
- continue;
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
- memset(vaddr, 0, PAGE_SIZE);
+ memset(vaddr, 0, PAGE_SIZE);
- engine_heartbeat_disable(engine);
- err = slice_semaphore_queue(engine, vma, count);
- engine_heartbeat_enable(engine);
- if (err)
- goto err_pin;
+ st_engine_heartbeat_disable(engine);
+ err = slice_semaphore_queue(engine, vma, 5);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ goto err_pin;
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto err_pin;
- }
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto err_pin;
}
}
@@ -1020,7 +1151,7 @@ static int live_timeslice_rewind(void *arg)
* Expect execution/evaluation order XZY
*/
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
slot = memset32(engine->status_page.addr + 1000, 0, 4);
@@ -1031,18 +1162,18 @@ static int live_timeslice_rewind(void *arg)
goto err;
}
- rq[0] = create_rewinder(ce, NULL, slot, X);
- if (IS_ERR(rq[0])) {
+ rq[A1] = create_rewinder(ce, NULL, slot, X);
+ if (IS_ERR(rq[A1])) {
intel_context_put(ce);
goto err;
}
- rq[1] = create_rewinder(ce, NULL, slot, Y);
+ rq[A2] = create_rewinder(ce, NULL, slot, Y);
intel_context_put(ce);
- if (IS_ERR(rq[1]))
+ if (IS_ERR(rq[A2]))
goto err;
- err = wait_for_submit(engine, rq[1], HZ / 2);
+ err = wait_for_submit(engine, rq[A2], HZ / 2);
if (err) {
pr_err("%s: failed to submit first context\n",
engine->name);
@@ -1055,12 +1186,12 @@ static int live_timeslice_rewind(void *arg)
goto err;
}
- rq[2] = create_rewinder(ce, rq[0], slot, Z);
+ rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
intel_context_put(ce);
if (IS_ERR(rq[2]))
goto err;
- err = wait_for_submit(engine, rq[2], HZ / 2);
+ err = wait_for_submit(engine, rq[B1], HZ / 2);
if (err) {
pr_err("%s: failed to submit second context\n",
engine->name);
@@ -1068,6 +1199,7 @@ static int live_timeslice_rewind(void *arg)
}
/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+ ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
/* Wait for the timeslice to kick in */
del_timer(&engine->execlists.timer);
@@ -1114,7 +1246,7 @@ err:
wmb();
engine->props.timeslice_duration_ms = timeslice;
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
for (i = 0; i < 3; i++)
i915_request_put(rq[i]);
if (igt_flush_test(gt->i915))
@@ -1140,9 +1272,17 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
return rq;
}
-static long timeslice_threshold(const struct intel_engine_cs *engine)
+static long slice_timeout(struct intel_engine_cs *engine)
{
- return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1;
+ long timeout;
+
+ /* Enough time for a timeslice to kick in, and kick out */
+ timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
+
+ /* Enough time for the nop request to complete */
+ timeout += HZ / 5;
+
+ return timeout + 1;
}
static int live_timeslice_queue(void *arg)
@@ -1198,7 +1338,7 @@ static int live_timeslice_queue(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
memset(vaddr, 0, PAGE_SIZE);
/* ELSP[0]: semaphore wait */
@@ -1243,24 +1383,8 @@ static int live_timeslice_queue(void *arg)
intel_engine_flush_submission(engine);
} while (READ_ONCE(engine->execlists.pending[0]));
- if (!READ_ONCE(engine->execlists.timer.expires) &&
- execlists_active(&engine->execlists) == rq &&
- !i915_request_completed(rq)) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- GEM_TRACE_ERR("%s: Failed to enable timeslicing!\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
- GEM_TRACE_DUMP();
-
- memset(vaddr, 0xff, PAGE_SIZE);
- err = -EINVAL;
- }
-
/* Timeslice every jiffy, so within 2 we should signal */
- if (i915_request_wait(rq, 0, timeslice_threshold(engine)) < 0) {
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
@@ -1275,7 +1399,7 @@ static int live_timeslice_queue(void *arg)
err_rq:
i915_request_put(rq);
err_heartbeat:
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
}
@@ -1321,7 +1445,7 @@ static int live_timeslice_nopreempt(void *arg)
break;
}
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
/* Create an unpreemptible spinner */
@@ -1349,7 +1473,7 @@ static int live_timeslice_nopreempt(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
- err = PTR_ERR(rq);
+ err = PTR_ERR(ce);
goto out_spin;
}
@@ -1379,7 +1503,7 @@ static int live_timeslice_nopreempt(void *arg)
* allow the maximum priority barrier through. Wait long
* enough to see if it is timesliced in by mistake.
*/
- if (i915_request_wait(rq, 0, timeslice_threshold(engine)) >= 0) {
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
engine->name);
err = -EINVAL;
@@ -1390,7 +1514,7 @@ out_spin:
igt_spinner_end(&spin);
out_heartbeat:
xchg(&engine->props.timeslice_duration_ms, timeslice);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
@@ -2294,7 +2418,7 @@ static int live_suppress_self_preempt(void *arg)
if (igt_flush_test(gt->i915))
goto err_wedged;
- intel_engine_pm_get(engine);
+ st_engine_heartbeat_disable(engine);
engine->execlists.preempt_hang.count = 0;
rq_a = spinner_create_request(&a.spin,
@@ -2302,14 +2426,14 @@ static int live_suppress_self_preempt(void *arg)
MI_NOOP);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
goto err_client_b;
}
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
pr_err("First client failed to start\n");
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
goto err_wedged;
}
@@ -2321,7 +2445,7 @@ static int live_suppress_self_preempt(void *arg)
MI_NOOP);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
goto err_client_b;
}
i915_request_add(rq_b);
@@ -2332,7 +2456,7 @@ static int live_suppress_self_preempt(void *arg)
if (!igt_wait_for_spinner(&b.spin, rq_b)) {
pr_err("Second client failed to start\n");
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
goto err_wedged;
}
@@ -2346,12 +2470,12 @@ static int live_suppress_self_preempt(void *arg)
engine->name,
engine->execlists.preempt_hang.count,
depth);
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
err = -EINVAL;
goto err_client_b;
}
- intel_engine_pm_put(engine);
+ st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
goto err_wedged;
}
@@ -2371,183 +2495,6 @@ err_wedged:
goto err_client_b;
}
-static int __i915_sw_fence_call
-dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
-{
- return NOTIFY_DONE;
-}
-
-static struct i915_request *dummy_request(struct intel_engine_cs *engine)
-{
- struct i915_request *rq;
-
- rq = kzalloc(sizeof(*rq), GFP_KERNEL);
- if (!rq)
- return NULL;
-
- rq->engine = engine;
-
- spin_lock_init(&rq->lock);
- INIT_LIST_HEAD(&rq->fence.cb_list);
- rq->fence.lock = &rq->lock;
- rq->fence.ops = &i915_fence_ops;
-
- i915_sched_node_init(&rq->sched);
-
- /* mark this request as permanently incomplete */
- rq->fence.seqno = 1;
- BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
- rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
- GEM_BUG_ON(i915_request_completed(rq));
-
- i915_sw_fence_init(&rq->submit, dummy_notify);
- set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
-
- spin_lock_init(&rq->lock);
- rq->fence.lock = &rq->lock;
- INIT_LIST_HEAD(&rq->fence.cb_list);
-
- return rq;
-}
-
-static void dummy_request_free(struct i915_request *dummy)
-{
- /* We have to fake the CS interrupt to kick the next request */
- i915_sw_fence_commit(&dummy->submit);
-
- i915_request_mark_complete(dummy);
- dma_fence_signal(&dummy->fence);
-
- i915_sched_node_fini(&dummy->sched);
- i915_sw_fence_fini(&dummy->submit);
-
- dma_fence_free(&dummy->fence);
-}
-
-static int live_suppress_wait_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct preempt_client client[4];
- struct i915_request *rq[ARRAY_SIZE(client)] = {};
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
- int i;
-
- /*
- * Waiters are given a little priority nudge, but not enough
- * to actually cause any preemption. Double check that we do
- * not needlessly generate preempt-to-idle cycles.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &client[0])) /* ELSP[0] */
- return -ENOMEM;
- if (preempt_client_init(gt, &client[1])) /* ELSP[1] */
- goto err_client_0;
- if (preempt_client_init(gt, &client[2])) /* head of queue */
- goto err_client_1;
- if (preempt_client_init(gt, &client[3])) /* bystander */
- goto err_client_2;
-
- for_each_engine(engine, gt, id) {
- int depth;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!engine->emit_init_breadcrumb)
- continue;
-
- for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
- struct i915_request *dummy;
-
- engine->execlists.preempt_hang.count = 0;
-
- dummy = dummy_request(engine);
- if (!dummy)
- goto err_client_3;
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct i915_request *this;
-
- this = spinner_create_request(&client[i].spin,
- client[i].ctx, engine,
- MI_NOOP);
- if (IS_ERR(this)) {
- err = PTR_ERR(this);
- goto err_wedged;
- }
-
- /* Disable NEWCLIENT promotion */
- __i915_active_fence_set(&i915_request_timeline(this)->last_request,
- &dummy->fence);
-
- rq[i] = i915_request_get(this);
- i915_request_add(this);
- }
-
- dummy_request_free(dummy);
-
- GEM_BUG_ON(i915_request_completed(rq[0]));
- if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
- pr_err("%s: First client failed to start\n",
- engine->name);
- goto err_wedged;
- }
- GEM_BUG_ON(!i915_request_started(rq[0]));
-
- if (i915_request_wait(rq[depth],
- I915_WAIT_PRIORITY,
- 1) != -ETIME) {
- pr_err("%s: Waiter depth:%d completed!\n",
- engine->name, depth);
- goto err_wedged;
- }
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- igt_spinner_end(&client[i].spin);
- i915_request_put(rq[i]);
- rq[i] = NULL;
- }
-
- if (igt_flush_test(gt->i915))
- goto err_wedged;
-
- if (engine->execlists.preempt_hang.count) {
- pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
- engine->name,
- engine->execlists.preempt_hang.count,
- depth);
- err = -EINVAL;
- goto err_client_3;
- }
- }
- }
-
- err = 0;
-err_client_3:
- preempt_client_fini(&client[3]);
-err_client_2:
- preempt_client_fini(&client[2]);
-err_client_1:
- preempt_client_fini(&client[1]);
-err_client_0:
- preempt_client_fini(&client[0]);
- return err;
-
-err_wedged:
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- igt_spinner_end(&client[i].spin);
- i915_request_put(rq[i]);
- }
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_3;
-}
-
static int live_chain_preempt(void *arg)
{
struct intel_gt *gt = arg;
@@ -2796,6 +2743,168 @@ err_ce:
return err;
}
+static int __live_preempt_ring(struct intel_engine_cs *engine,
+ struct igt_spinner *spin,
+ int queue_sz, int ring_sz)
+{
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err = 0;
+ int n;
+
+ if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
+ return -EIO;
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ tmp->ring = __intel_context_ring_size(ring_sz);
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ intel_gt_set_wedged(engine->gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, queue_sz, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ i915_request_put(rq);
+
+ /* Create a second request to preempt the first ring */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submited\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that we rollback large chunks of a ring in order to do a
+ * preemption event. Similar to live_unlite_ring, but looking at
+ * ring size rather than the impact of intel_ring_direction().
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n <= 3; n++) {
+ err = __live_preempt_ring(engine, &spin,
+ n * SZ_4K / 4, SZ_4K);
+ if (err)
+ break;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
static int live_preempt_gang(void *arg)
{
struct intel_gt *gt = arg;
@@ -2840,16 +2949,8 @@ static int live_preempt_gang(void *arg)
/* Submit each spinner at increasing priority */
engine->schedule(rq, &attr);
-
- if (prio <= I915_PRIORITY_MAX)
- continue;
-
- if (prio > (INT_MAX >> I915_USER_PRIORITY_SHIFT))
- break;
-
- if (__igt_timeout(end_time, NULL))
- break;
- } while (1);
+ } while (prio <= I915_PRIORITY_MAX &&
+ !__igt_timeout(end_time, NULL));
pr_debug("%s: Preempt chain of %d requests\n",
engine->name, prio);
@@ -3417,7 +3518,7 @@ static int smoke_crescendo_thread(void *arg)
return err;
count++;
- } while (!__igt_timeout(end_time, NULL));
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
smoke->count = count;
return 0;
@@ -3468,8 +3569,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
}
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
- count, flags,
- RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
return 0;
}
@@ -3493,11 +3593,10 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
count++;
}
- } while (!__igt_timeout(end_time, NULL));
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
- count, flags,
- RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
return 0;
}
@@ -3506,7 +3605,7 @@ static int live_preempt_smoke(void *arg)
struct preempt_smoke smoke = {
.gt = arg,
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
- .ncontext = 1024,
+ .ncontext = 256,
};
const unsigned int phase[] = { 0, BATCH };
struct igt_live_test t;
@@ -3706,13 +3805,43 @@ out:
return err;
}
+static unsigned int
+__select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ bool (*filter)(const struct intel_engine_cs *))
+{
+ unsigned int n = 0;
+ unsigned int inst;
+
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ if (filter && !filter(gt->engine_class[class][inst]))
+ continue;
+
+ siblings[n++] = gt->engine_class[class][inst];
+ }
+
+ return n;
+}
+
+static unsigned int
+select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings)
+{
+ return __select_siblings(gt, class, siblings, NULL);
+}
+
static int live_virtual_engine(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int class, inst;
+ unsigned int class;
int err;
if (intel_uc_uses_guc_submission(&gt->uc))
@@ -3730,13 +3859,7 @@ static int live_virtual_engine(void *arg)
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, n;
- nsibling = 0;
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- continue;
-
- siblings[nsibling++] = gt->engine_class[class][inst];
- }
+ nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
@@ -3845,7 +3968,7 @@ static int live_virtual_mask(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class, inst;
+ unsigned int class;
int err;
if (intel_uc_uses_guc_submission(&gt->uc))
@@ -3854,17 +3977,178 @@ static int live_virtual_mask(void *arg)
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
unsigned int nsibling;
- nsibling = 0;
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- break;
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = mask_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int slicein_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must take part in timeslicing on the target engines.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+ __func__, rq->engine->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int sliceout_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must allow others a fair timeslice.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ /* XXX We do not handle oversubscription and fairness with normal rq */
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ for (n = 0; !err && n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
- siblings[nsibling++] = gt->engine_class[class][inst];
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+ __func__, siblings[n]->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
}
+ i915_request_put(rq);
+ }
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_slice(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = __select_siblings(gt, class, siblings,
+ intel_engine_has_timeslices);
if (nsibling < 2)
continue;
- err = mask_virtual_engine(gt, siblings, nsibling);
+ err = slicein_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+
+ err = sliceout_virtual_engine(gt, siblings, nsibling);
if (err)
return err;
}
@@ -3982,7 +4266,7 @@ static int live_virtual_preserved(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class, inst;
+ unsigned int class;
/*
* Check that the context image retains non-privileged (user) registers
@@ -4000,13 +4284,7 @@ static int live_virtual_preserved(void *arg)
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, err;
- nsibling = 0;
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- continue;
-
- siblings[nsibling++] = gt->engine_class[class][inst];
- }
+ nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
@@ -4217,7 +4495,7 @@ static int live_virtual_bond(void *arg)
};
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class, inst;
+ unsigned int class;
int err;
if (intel_uc_uses_guc_submission(&gt->uc))
@@ -4227,14 +4505,7 @@ static int live_virtual_bond(void *arg)
const struct phase *p;
int nsibling;
- nsibling = 0;
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- break;
-
- GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
- siblings[nsibling++] = gt->engine_class[class][inst];
- }
+ nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
@@ -4280,7 +4551,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
}
for (n = 0; n < nsibling; n++)
- engine_heartbeat_disable(siblings[n]);
+ st_engine_heartbeat_disable(siblings[n]);
rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -4351,7 +4622,7 @@ out_rq:
i915_request_put(rq);
out_heartbeat:
for (n = 0; n < nsibling; n++)
- engine_heartbeat_enable(siblings[n]);
+ st_engine_heartbeat_enable(siblings[n]);
intel_context_put(ve);
out_spin:
@@ -4363,7 +4634,7 @@ static int live_virtual_reset(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class, inst;
+ unsigned int class;
/*
* Check that we handle a reset event within a virtual engine.
@@ -4381,13 +4652,7 @@ static int live_virtual_reset(void *arg)
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, err;
- nsibling = 0;
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- continue;
-
- siblings[nsibling++] = gt->engine_class[class][inst];
- }
+ nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
@@ -4405,6 +4670,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_sanitycheck),
SUBTEST(live_unlite_switch),
SUBTEST(live_unlite_preempt),
+ SUBTEST(live_unlite_ring),
SUBTEST(live_pin_rewind),
SUBTEST(live_hold_reset),
SUBTEST(live_error_interrupt),
@@ -4418,8 +4684,8 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_nopreempt),
SUBTEST(live_preempt_cancel),
SUBTEST(live_suppress_self_preempt),
- SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_ring),
SUBTEST(live_preempt_gang),
SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_user),
@@ -4427,6 +4693,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
SUBTEST(live_virtual_preserved),
+ SUBTEST(live_virtual_slice),
SUBTEST(live_virtual_bond),
SUBTEST(live_virtual_reset),
};
@@ -5030,7 +5297,7 @@ static int live_lrc_gpr(void *arg)
return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
err = __live_lrc_gpr(engine, scratch, false);
if (err)
@@ -5041,7 +5308,7 @@ static int live_lrc_gpr(void *arg)
goto err;
err:
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@@ -5190,7 +5457,7 @@ static int live_lrc_timestamp(void *arg)
for_each_engine(data.engine, gt, id) {
int i, err = 0;
- engine_heartbeat_disable(data.engine);
+ st_engine_heartbeat_disable(data.engine);
for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
struct intel_context *tmp;
@@ -5223,7 +5490,7 @@ static int live_lrc_timestamp(void *arg)
}
err:
- engine_heartbeat_enable(data.engine);
+ st_engine_heartbeat_enable(data.engine);
for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
if (!data.ce[i])
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index 63f87d8608c3..b25eba50c88e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -157,7 +157,7 @@ static int read_mocs_table(struct i915_request *rq,
{
u32 addr;
- if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
addr = global_mocs_offset();
else
addr = mocs_offset(rq->engine);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 2dc460624bbc..64ef5ee5decf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -132,7 +132,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce)
}
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(rq->engine->i915) >= 8)
cmd++;
*cs++ = cmd;
@@ -197,10 +197,10 @@ int live_rc6_ctx_wa(void *arg)
int pass;
for (pass = 0; pass < 2; pass++) {
+ struct i915_gpu_error *error = &gt->i915->gpu_error;
struct intel_context *ce;
unsigned int resets =
- i915_reset_engine_count(&gt->i915->gpu_error,
- engine);
+ i915_reset_engine_count(error, engine);
const u32 *res;
/* Use a sacrifical context */
@@ -230,11 +230,10 @@ int live_rc6_ctx_wa(void *arg)
engine->name, READ_ONCE(*res));
if (resets !=
- i915_reset_engine_count(&gt->i915->gpu_error,
- engine)) {
+ i915_reset_engine_count(error, engine)) {
pr_err("%s: GPU reset required\n",
engine->name);
- add_taint_for_CI(TAINT_WARN);
+ add_taint_for_CI(gt->i915, TAINT_WARN);
err = -EIO;
goto out;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index c91981e75ebf..8624f5d2a1f3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -12,6 +12,7 @@
#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
+#include "selftest_engine_heartbeat.h"
#include "selftest_rps.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_spinner.h"
@@ -20,22 +21,6 @@
/* Try to isolate the impact of cstates from determing frequency response */
#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
-static void engine_heartbeat_disable(struct intel_engine_cs *engine)
-{
- engine->props.heartbeat_interval_ms = 0;
-
- intel_engine_pm_get(engine);
- intel_engine_park_heartbeat(engine);
-}
-
-static void engine_heartbeat_enable(struct intel_engine_cs *engine)
-{
- intel_engine_pm_put(engine);
-
- engine->props.heartbeat_interval_ms =
- engine->defaults.heartbeat_interval_ms;
-}
-
static void dummy_rps_work(struct work_struct *wrk)
{
}
@@ -249,13 +234,13 @@ int live_rps_clock_interval(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
err = PTR_ERR(rq);
break;
}
@@ -266,7 +251,7 @@ int live_rps_clock_interval(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@@ -322,7 +307,7 @@ int live_rps_clock_interval(void *arg)
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err == 0) {
u64 time = intel_gt_pm_interval_to_ns(gt, cycles);
@@ -408,7 +393,7 @@ int live_rps_control(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
@@ -424,7 +409,7 @@ int live_rps_control(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@@ -434,7 +419,7 @@ int live_rps_control(void *arg)
pr_err("%s: could not set minimum frequency [%x], only %x!\n",
engine->name, rps->min_freq, read_cagf(rps));
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
show_pstate_limits(rps);
err = -EINVAL;
break;
@@ -451,7 +436,7 @@ int live_rps_control(void *arg)
pr_err("%s: could not restore minimum frequency [%x], only %x!\n",
engine->name, rps->min_freq, read_cagf(rps));
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
show_pstate_limits(rps);
err = -EINVAL;
break;
@@ -466,7 +451,7 @@ int live_rps_control(void *arg)
min_dt = ktime_sub(ktime_get(), min_dt);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n",
engine->name,
@@ -637,14 +622,14 @@ int live_rps_frequency_cs(void *arg)
int freq;
} min, max;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
vma = create_spin_counter(engine,
engine->kernel_context->vm, false,
&cancel, &cntr);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
break;
}
@@ -725,7 +710,7 @@ err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@@ -779,14 +764,14 @@ int live_rps_frequency_srm(void *arg)
int freq;
} min, max;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
vma = create_spin_counter(engine,
engine->kernel_context->vm, true,
&cancel, &cntr);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
break;
}
@@ -866,7 +851,7 @@ err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@@ -1061,11 +1046,11 @@ int live_rps_interrupt(void *arg)
intel_gt_pm_wait_for_idle(engine->gt);
GEM_BUG_ON(intel_rps_is_active(rps));
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
err = __rps_up_interrupt(rps, engine, &spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
goto out;
@@ -1074,13 +1059,13 @@ int live_rps_interrupt(void *arg)
/* Keep the engine awake but idle and check for DOWN */
if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
intel_rc6_disable(&gt->rc6);
err = __rps_down_interrupt(rps, engine);
intel_rc6_enable(&gt->rc6);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
goto out;
}
@@ -1165,13 +1150,13 @@ int live_rps_power(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
err = PTR_ERR(rq);
break;
}
@@ -1182,7 +1167,7 @@ int live_rps_power(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@@ -1195,7 +1180,7 @@ int live_rps_power(void *arg)
min.power = measure_power_at(rps, &min.freq);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
engine->name,
@@ -1252,6 +1237,11 @@ int live_rps_dynamic(void *arg)
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
+ if (intel_rps_has_interrupts(rps))
+ pr_info("RPS has interrupt support\n");
+ if (intel_rps_uses_timer(rps))
+ pr_info("RPS has timer support\n");
+
for_each_engine(engine, gt, id) {
struct i915_request *rq;
struct {
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index ef1c35073dc0..fb5b7d3498a6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -12,6 +12,7 @@
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
+#include "selftest_engine_heartbeat.h"
#include "../selftests/i915_random.h"
#include "../i915_selftest.h"
@@ -426,12 +427,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
if (IS_ERR(cs))
return PTR_ERR(cs);
- if (INTEL_GEN(rq->i915) >= 8) {
+ if (INTEL_GEN(rq->engine->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = addr;
*cs++ = 0;
*cs++ = value;
- } else if (INTEL_GEN(rq->i915) >= 4) {
+ } else if (INTEL_GEN(rq->engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = addr;
@@ -561,8 +562,9 @@ static int live_hwsp_engine(void *arg)
struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- n, *tl->hwsp_seqno);
+ pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
+ n, tl->hwsp_offset, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
err = -EINVAL;
}
intel_timeline_put(tl);
@@ -632,8 +634,9 @@ out:
struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- n, *tl->hwsp_seqno);
+ pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
+ n, tl->hwsp_offset, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
err = -EINVAL;
}
intel_timeline_put(tl);
@@ -751,22 +754,6 @@ out_free:
return err;
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine)
-{
- engine->props.heartbeat_interval_ms = 0;
-
- intel_engine_pm_get(engine);
- intel_engine_park_heartbeat(engine);
-}
-
-static void engine_heartbeat_enable(struct intel_engine_cs *engine)
-{
- intel_engine_pm_put(engine);
-
- engine->props.heartbeat_interval_ms =
- engine->defaults.heartbeat_interval_ms;
-}
-
static int live_hwsp_rollover_kernel(void *arg)
{
struct intel_gt *gt = arg;
@@ -785,7 +772,7 @@ static int live_hwsp_rollover_kernel(void *arg)
struct i915_request *rq[3] = {};
int i;
- engine_heartbeat_disable(engine);
+ st_engine_heartbeat_disable(engine);
if (intel_gt_wait_for_idle(gt, HZ / 2)) {
err = -EIO;
goto out;
@@ -836,7 +823,7 @@ static int live_hwsp_rollover_kernel(void *arg)
out:
for (i = 0; i < ARRAY_SIZE(rq); i++)
i915_request_put(rq[i]);
- engine_heartbeat_enable(engine);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
}
@@ -980,8 +967,9 @@ static int live_hwsp_recycle(void *arg)
}
if (*tl->hwsp_seqno != count) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ pr_err("Invalid seqno stored in timeline %lu @ tl->hwsp_offset, found 0x%x\n",
count, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 32785463ec9e..febc9e6692ba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -417,6 +417,20 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
return false;
}
+static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
+{
+ reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
+ switch (reg) {
+ case 0x358:
+ case 0x35c:
+ case 0x3a8:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static bool ro_register(u32 reg)
{
if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
@@ -497,6 +511,9 @@ static int check_dirty_whitelist(struct intel_context *ce)
if (wo_register(engine, reg))
continue;
+ if (timestamp(engine, reg))
+ continue; /* timestamps are expected to autoincrement */
+
ro_reg = ro_register(reg);
/* Clear non priv flags */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 101728006ae9..d44061033f23 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -67,7 +67,7 @@ struct __guc_ads_blob {
static void __guc_ads_init(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct intel_gt *gt = guc_to_gt(guc);
struct __guc_ads_blob *blob = guc->ads_blob;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
@@ -99,13 +99,13 @@ static void __guc_ads_init(struct intel_guc *guc)
}
/* System info */
- blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
+ blob->system_info.slice_enabled = hweight8(gt->info.sseu.slice_mask);
blob->system_info.rcs_enabled = 1;
blob->system_info.bcs_enabled = 1;
- blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv);
- blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv);
- blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ blob->system_info.vdbox_enable_mask = VDBOX_MASK(gt);
+ blob->system_info.vebox_enable_mask = VEBOX_MASK(gt);
+ blob->system_info.vdbox_sfc_support_mask = gt->info.vdbox_sfc_access;
base = intel_guc_ggtt_offset(guc, guc->ads_vma);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index fb10f3597ea5..9bbe8a795cb8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -424,25 +424,28 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
static u32 __get_default_log_level(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
/* A negative value means "use platform/config default" */
- if (i915_modparams.guc_log_level < 0) {
+ if (i915->params.guc_log_level < 0) {
return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
}
- if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
+ if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "guc_log_level", i915_modparams.guc_log_level,
+ "guc_log_level", i915->params.guc_log_level,
"verbosity too high");
return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
}
- GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED);
- GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX);
- return i915_modparams.guc_log_level;
+ GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED);
+ GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX);
+ return i915->params.guc_log_level;
}
int intel_guc_log_create(struct intel_guc_log *log)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 94eb63f309ce..fdfeb4b9b0f5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -660,10 +660,12 @@ void intel_guc_submission_disable(struct intel_guc *guc)
static bool __guc_submission_selected(struct intel_guc *guc)
{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
if (!intel_guc_submission_is_supported(guc))
return false;
- return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
+ return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
}
void intel_guc_submission_init_early(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index f518fe05c6f9..d6f55f70889d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -47,15 +47,15 @@ static void __confirm_options(struct intel_uc *uc)
drm_dbg(&i915->drm,
"enable_guc=%d (guc:%s submission:%s huc:%s)\n",
- i915_modparams.enable_guc,
+ i915->params.enable_guc,
yesno(intel_uc_wants_guc(uc)),
yesno(intel_uc_wants_guc_submission(uc)),
yesno(intel_uc_wants_huc(uc)));
- if (i915_modparams.enable_guc == -1)
+ if (i915->params.enable_guc == -1)
return;
- if (i915_modparams.enable_guc == 0) {
+ if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
GEM_BUG_ON(intel_uc_wants_huc(uc));
@@ -65,25 +65,25 @@ static void __confirm_options(struct intel_uc *uc)
if (!intel_uc_supports_guc(uc))
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
- i915_modparams.enable_guc, "GuC is not supported!");
+ i915->params.enable_guc, "GuC is not supported!");
- if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
+ if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
- i915_modparams.enable_guc, "HuC is not supported!");
+ i915->params.enable_guc, "HuC is not supported!");
- if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
+ if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
- i915_modparams.enable_guc, "GuC submission is N/A");
+ i915->params.enable_guc, "GuC submission is N/A");
- if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
+ if (i915->params.enable_guc & ~(ENABLE_GUC_SUBMISSION |
ENABLE_GUC_LOAD_HUC))
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
- i915_modparams.enable_guc, "undocumented flag");
+ i915->params.enable_guc, "undocumented flag");
}
void intel_uc_init_early(struct intel_uc *uc)
@@ -267,8 +267,17 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
GEM_BUG_ON(!intel_uc_wants_guc(uc));
err = intel_uc_fw_fetch(&uc->guc.fw);
- if (err)
+ if (err) {
+ /* Make sure we transition out of transient "SELECTED" state */
+ if (intel_uc_wants_huc(uc)) {
+ drm_dbg(&uc_to_gt(uc)->i915->drm,
+ "Failed to fetch GuC: %d disabling HuC\n", err);
+ intel_uc_fw_change_status(&uc->huc.fw,
+ INTEL_UC_FIRMWARE_ERROR);
+ }
+
return;
+ }
if (intel_uc_wants_huc(uc))
intel_uc_fw_fetch(&uc->huc.fw);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
index 9d16b784aa0d..089d98662f46 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -4,14 +4,41 @@
*/
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+#include "gt/debugfs_gt.h"
#include "intel_guc_debugfs.h"
#include "intel_huc_debugfs.h"
#include "intel_uc.h"
#include "intel_uc_debugfs.h"
+static int uc_usage_show(struct seq_file *m, void *data)
+{
+ struct intel_uc *uc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_printf(&p, "[guc] supported:%s wanted:%s used:%s\n",
+ yesno(intel_uc_supports_guc(uc)),
+ yesno(intel_uc_wants_guc(uc)),
+ yesno(intel_uc_uses_guc(uc)));
+ drm_printf(&p, "[huc] supported:%s wanted:%s used:%s\n",
+ yesno(intel_uc_supports_huc(uc)),
+ yesno(intel_uc_wants_huc(uc)),
+ yesno(intel_uc_uses_huc(uc)));
+ drm_printf(&p, "[submission] supported:%s wanted:%s used:%s\n",
+ yesno(intel_uc_supports_guc_submission(uc)),
+ yesno(intel_uc_wants_guc_submission(uc)),
+ yesno(intel_uc_uses_guc_submission(uc)));
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(uc_usage);
+
void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
{
+ static const struct debugfs_gt_file files[] = {
+ { "usage", &uc_usage_fops, NULL },
+ };
struct dentry *root;
if (!gt_root)
@@ -25,6 +52,8 @@ void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
if (IS_ERR(root))
return;
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc);
+
intel_guc_debugfs_register(&uc->guc, root);
intel_huc_debugfs_register(&uc->huc, root);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index e1caae93996d..59b27aba15c6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -47,12 +47,15 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas
* between 33.0 and 35.2 are only related to new additions to support new Gen12
* features.
+ *
+ * Note that RKL uses the same firmware as TGL.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
+ fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
- fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 4, 0, 0)) \
fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
@@ -112,11 +115,13 @@ struct __packed uc_fw_platform_requirement {
},
static void
-__uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev)
+__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
static const struct uc_fw_platform_requirement fw_blobs[] = {
INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB)
};
+ enum intel_platform p = INTEL_INFO(i915)->platform;
+ u8 rev = INTEL_REVID(i915);
int i;
for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) {
@@ -151,35 +156,35 @@ __uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev)
}
/* We don't want to enable GuC/HuC on pre-Gen11 by default */
- if (i915_modparams.enable_guc == -1 && p < INTEL_ICELAKE)
+ if (i915->params.enable_guc == -1 && p < INTEL_ICELAKE)
uc_fw->path = NULL;
}
-static const char *__override_guc_firmware_path(void)
+static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
{
- if (i915_modparams.enable_guc & (ENABLE_GUC_SUBMISSION |
- ENABLE_GUC_LOAD_HUC))
- return i915_modparams.guc_firmware_path;
+ if (i915->params.enable_guc & (ENABLE_GUC_SUBMISSION |
+ ENABLE_GUC_LOAD_HUC))
+ return i915->params.guc_firmware_path;
return "";
}
-static const char *__override_huc_firmware_path(void)
+static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
{
- if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC)
- return i915_modparams.huc_firmware_path;
+ if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
+ return i915->params.huc_firmware_path;
return "";
}
-static void __uc_fw_user_override(struct intel_uc_fw *uc_fw)
+static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
const char *path = NULL;
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_GUC:
- path = __override_guc_firmware_path();
+ path = __override_guc_firmware_path(i915);
break;
case INTEL_UC_FW_TYPE_HUC:
- path = __override_huc_firmware_path();
+ path = __override_huc_firmware_path(i915);
break;
}
@@ -213,10 +218,8 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
uc_fw->type = type;
if (HAS_GT_UC(i915)) {
- __uc_fw_auto_select(uc_fw,
- INTEL_INFO(i915)->platform,
- INTEL_REVID(i915));
- __uc_fw_user_override(uc_fw);
+ __uc_fw_auto_select(i915, uc_fw);
+ __uc_fw_user_override(i915, uc_fw);
}
intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 8b87f130f7f1..f1940939260a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1904,19 +1904,10 @@ static int perform_bb_shadow(struct parser_exec_state *s)
goto err_free_bb;
}
- ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush);
- if (ret)
- goto err_free_obj;
-
bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
if (IS_ERR(bb->va)) {
ret = PTR_ERR(bb->va);
- goto err_finish_shmem_access;
- }
-
- if (bb->clflush & CLFLUSH_BEFORE) {
- drm_clflush_virt_range(bb->va, bb->obj->base.size);
- bb->clflush &= ~CLFLUSH_BEFORE;
+ goto err_free_obj;
}
ret = copy_gma_to_hva(s->vgpu, mm,
@@ -1935,7 +1926,6 @@ static int perform_bb_shadow(struct parser_exec_state *s)
INIT_LIST_HEAD(&bb->list);
list_add(&bb->list, &s->workload->shadow_bb);
- bb->accessing = true;
bb->bb_start_cmd_va = s->ip_va;
if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
@@ -1956,8 +1946,6 @@ static int perform_bb_shadow(struct parser_exec_state *s)
return 0;
err_unmap:
i915_gem_object_unpin_map(bb->obj);
-err_finish_shmem_access:
- i915_gem_object_finish_access(bb->obj);
err_free_obj:
i915_gem_object_put(bb->obj);
err_free_bb:
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index a1696e9ce4b6..7ba16ddfe75f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -199,8 +199,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) {
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
@@ -314,8 +316,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv)) &&
+ if ((IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
@@ -498,8 +502,10 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* TODO: add more platforms support */
- if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915)) {
+ if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED;
@@ -527,8 +533,10 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
@@ -551,8 +559,10 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
intel_vgpu_init_i2c_edid(vgpu);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution);
else
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 37fc460414a8..c3eb3838fe88 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -198,6 +198,7 @@ static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
}
static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
+ .name = "i915_gem_object_vgpu",
.flags = I915_GEM_OBJECT_IS_PROXY,
.get_pages = vgpu_gem_get_pages,
.put_pages = vgpu_gem_put_pages,
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 190651df5db1..22247805c345 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -149,7 +149,7 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (IS_BROXTON(i915))
port = bxt_get_port_from_gmbus0(pin_select);
- else if (IS_COFFEELAKE(i915))
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
port = cnp_get_port_from_gmbus0(pin_select);
else
port = get_port_from_gmbus0(pin_select);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index fadd2adb8030..63bba7b4bb2f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -59,7 +59,7 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_KBL;
else if (IS_BROXTON(i915))
return D_BXT;
- else if (IS_COFFEELAKE(i915))
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
return D_CFL;
return 0;
@@ -347,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
}
- engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask;
+ engine_mask &= vgpu->gvt->gt->info.engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -1435,7 +1435,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
case GEN9_PCODE_READ_MEM_LATENCY:
if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
IS_KABYLAKE(vgpu->gvt->gt->i915) ||
- IS_COFFEELAKE(vgpu->gvt->gt->i915)) {
+ IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
+ IS_COMETLAKE(vgpu->gvt->gt->i915)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1460,7 +1461,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
case SKL_PCODE_CDCLK_CONTROL:
if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
IS_KABYLAKE(vgpu->gvt->gt->i915) ||
- IS_COFFEELAKE(vgpu->gvt->gt->i915))
+ IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
+ IS_COMETLAKE(vgpu->gvt->gt->i915))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@@ -1722,7 +1724,8 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int ret;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
- if (IS_COFFEELAKE(vgpu->gvt->gt->i915))
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
+ IS_COMETLAKE(vgpu->gvt->gt->i915))
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
@@ -1731,7 +1734,8 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
- if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
+ if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
+ IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
IS_MASKED_BITS_ENABLED(data, 2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
@@ -1864,7 +1868,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
- if (HAS_ENGINE(dev_priv, VCS1)) \
+ if (HAS_ENGINE(gvt->gt, VCS1)) \
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
@@ -3395,7 +3399,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
} else if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915)) {
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 540017fed908..7498878e6289 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -540,7 +540,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
+ if (HAS_ENGINE(gvt->gt, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 2ccaf78f96e8..86a60bdf0818 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -171,7 +171,7 @@ static void load_render_mocs(const struct intel_engine_cs *engine)
return;
for (ring_id = 0; ring_id < cnt; ring_id++) {
- if (!HAS_ENGINE(engine->i915, ring_id))
+ if (!HAS_ENGINE(engine->gt, ring_id))
continue;
offset.reg = regs[ring_id];
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 0fb1df71c637..3c3b9842bbbd 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -348,7 +348,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
u32 *cs;
int err;
- if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
+ if (IS_GEN(req->engine->i915, 9) && is_inhibit_context(req->context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/*
@@ -509,26 +509,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
+ bb->bb_offset;
- if (bb->ppgtt) {
- /* for non-priv bb, scan&shadow is only for
- * debugging purpose, so the content of shadow bb
- * is the same as original bb. Therefore,
- * here, rather than switch to shadow bb's gma
- * address, we directly use original batch buffer's
- * gma address, and send original bb to hardware
- * directly
- */
- if (bb->clflush & CLFLUSH_AFTER) {
- drm_clflush_virt_range(bb->va,
- bb->obj->base.size);
- bb->clflush &= ~CLFLUSH_AFTER;
- }
- i915_gem_object_finish_access(bb->obj);
- bb->accessing = false;
-
- } else {
+ /*
+ * For non-priv bb, scan&shadow is only for
+ * debugging purpose, so the content of shadow bb
+ * is the same as original bb. Therefore,
+ * here, rather than switch to shadow bb's gma
+ * address, we directly use original batch buffer's
+ * gma address, and send original bb to hardware
+ * directly
+ */
+ if (!bb->ppgtt) {
bb->vma = i915_gem_object_ggtt_pin(bb->obj,
- NULL, 0, 0, 0);
+ NULL, 0, 0, 0);
if (IS_ERR(bb->vma)) {
ret = PTR_ERR(bb->vma);
goto err;
@@ -539,27 +531,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
if (gmadr_bytes == 8)
bb->bb_start_cmd_va[2] = 0;
- /* No one is going to touch shadow bb from now on. */
- if (bb->clflush & CLFLUSH_AFTER) {
- drm_clflush_virt_range(bb->va,
- bb->obj->base.size);
- bb->clflush &= ~CLFLUSH_AFTER;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(bb->obj,
- false);
- if (ret)
- goto err;
-
ret = i915_vma_move_to_active(bb->vma,
workload->req,
0);
if (ret)
goto err;
-
- i915_gem_object_finish_access(bb->obj);
- bb->accessing = false;
}
+
+ /* No one is going to touch shadow bb from now on. */
+ i915_gem_object_flush_map(bb->obj);
}
return 0;
err:
@@ -630,9 +610,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
- if (bb->accessing)
- i915_gem_object_finish_access(bb->obj);
-
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
@@ -939,7 +916,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
+ if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0)
context_page_num = 19;
context_base = (void *) ctx->lrc_reg_state -
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 15d317f2a4a4..64e7a0b791c3 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -124,8 +124,6 @@ struct intel_vgpu_shadow_bb {
struct i915_vma *vma;
void *va;
u32 *bb_start_cmd_va;
- unsigned int clflush;
- bool accessing;
unsigned long bb_offset;
bool ppgtt;
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e7532e7d74e9..784219962193 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -34,11 +34,13 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
+#include "gt/intel_sseu_debugfs.h"
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
@@ -61,10 +63,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
intel_device_info_print_static(INTEL_INFO(i915), &p);
intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
+ intel_gt_info_print(&i915->gt.info, &p);
intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
- i915_params_dump(&i915_modparams, &p);
+ i915_params_dump(&i915->params, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
@@ -492,6 +495,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "PCU interrupt enable:\t%08x\n",
I915_READ(GEN8_PCU_IER));
} else if (INTEL_GEN(dev_priv) >= 11) {
+ if (HAS_MASTER_UNIT_IRQ(dev_priv))
+ seq_printf(m, "Master Unit Interrupt Control: %08x\n",
+ I915_READ(DG1_MSTR_UNIT_INTR));
+
seq_printf(m, "Master Interrupt Control: %08x\n",
I915_READ(GEN11_GFX_MSTR_IRQ));
@@ -1138,13 +1145,20 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
struct intel_uncore *uncore = &dev_priv->uncore;
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
+ if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ seq_puts(m, "L-shaped memory detected\n");
+
+ /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
+ if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
intel_uncore_read(uncore, DCC));
@@ -1173,9 +1187,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
intel_uncore_read(uncore, DISP_ARB_CTL));
}
- if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
- seq_puts(m, "L-shaped memory detected\n");
-
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
@@ -1316,16 +1327,6 @@ static int i915_engine_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_rcs_topology(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
-
- return 0;
-}
-
static int i915_shrinker_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1572,264 +1573,16 @@ i915_cache_sharing_set(void *data, u64 val)
return 0;
}
-static void
-intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
- u8 *to_mask)
-{
- int offset = slice * sseu->ss_stride;
-
- memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
-}
-
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
-static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
-{
-#define SS_MAX 2
- const int ss_max = SS_MAX;
- u32 sig1[SS_MAX], sig2[SS_MAX];
- int ss;
-
- sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
- sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
- sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
- sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
-
- for (ss = 0; ss < ss_max; ss++) {
- unsigned int eu_cnt;
-
- if (sig1[ss] & CHV_SS_PG_ENABLE)
- /* skip disabled subslice */
- continue;
-
- sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] |= BIT(ss);
- eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
- ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
- ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
- ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
- sseu->eu_total += eu_cnt;
- sseu->eu_per_subslice = max_t(unsigned int,
- sseu->eu_per_subslice, eu_cnt);
- }
-#undef SS_MAX
-}
-
-static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
-{
-#define SS_MAX 6
- const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
- u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
- int s, ss;
-
- for (s = 0; s < info->sseu.max_slices; s++) {
- /*
- * FIXME: Valid SS Mask respects the spec and read
- * only valid bits for those registers, excluding reserved
- * although this seems wrong because it would leave many
- * subslices without ACK.
- */
- s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
- GEN10_PGCTL_VALID_SS_MASK(s);
- eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
- eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
- }
-
- eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
- GEN9_PGCTL_SSA_EU19_ACK |
- GEN9_PGCTL_SSA_EU210_ACK |
- GEN9_PGCTL_SSA_EU311_ACK;
- eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
- GEN9_PGCTL_SSB_EU19_ACK |
- GEN9_PGCTL_SSB_EU210_ACK |
- GEN9_PGCTL_SSB_EU311_ACK;
-
- for (s = 0; s < info->sseu.max_slices; s++) {
- if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
- /* skip disabled slice */
- continue;
-
- sseu->slice_mask |= BIT(s);
- intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
-
- for (ss = 0; ss < info->sseu.max_subslices; ss++) {
- unsigned int eu_cnt;
-
- if (info->sseu.has_subslice_pg &&
- !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
- /* skip disabled subslice */
- continue;
-
- eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
- eu_mask[ss % 2]);
- sseu->eu_total += eu_cnt;
- sseu->eu_per_subslice = max_t(unsigned int,
- sseu->eu_per_subslice,
- eu_cnt);
- }
- }
-#undef SS_MAX
-}
-
-static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
-{
-#define SS_MAX 3
- const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
- u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
- int s, ss;
-
- for (s = 0; s < info->sseu.max_slices; s++) {
- s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
- eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
- eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
- }
-
- eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
- GEN9_PGCTL_SSA_EU19_ACK |
- GEN9_PGCTL_SSA_EU210_ACK |
- GEN9_PGCTL_SSA_EU311_ACK;
- eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
- GEN9_PGCTL_SSB_EU19_ACK |
- GEN9_PGCTL_SSB_EU210_ACK |
- GEN9_PGCTL_SSB_EU311_ACK;
-
- for (s = 0; s < info->sseu.max_slices; s++) {
- if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
- /* skip disabled slice */
- continue;
-
- sseu->slice_mask |= BIT(s);
-
- if (IS_GEN9_BC(dev_priv))
- intel_sseu_copy_subslices(&info->sseu, s,
- sseu->subslice_mask);
-
- for (ss = 0; ss < info->sseu.max_subslices; ss++) {
- unsigned int eu_cnt;
- u8 ss_idx = s * info->sseu.ss_stride +
- ss / BITS_PER_BYTE;
-
- if (IS_GEN9_LP(dev_priv)) {
- if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
- /* skip disabled subslice */
- continue;
-
- sseu->subslice_mask[ss_idx] |=
- BIT(ss % BITS_PER_BYTE);
- }
-
- eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
- eu_mask[ss%2]);
- sseu->eu_total += eu_cnt;
- sseu->eu_per_subslice = max_t(unsigned int,
- sseu->eu_per_subslice,
- eu_cnt);
- }
- }
-#undef SS_MAX
-}
-
-static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
-{
- const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
- u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
- int s;
-
- sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
-
- if (sseu->slice_mask) {
- sseu->eu_per_subslice = info->sseu.eu_per_subslice;
- for (s = 0; s < fls(sseu->slice_mask); s++)
- intel_sseu_copy_subslices(&info->sseu, s,
- sseu->subslice_mask);
- sseu->eu_total = sseu->eu_per_subslice *
- intel_sseu_subslice_total(sseu);
-
- /* subtract fused off EU(s) from enabled slice(s) */
- for (s = 0; s < fls(sseu->slice_mask); s++) {
- u8 subslice_7eu = info->sseu.subslice_7eu[s];
-
- sseu->eu_total -= hweight8(subslice_7eu);
- }
- }
-}
-
-static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
- const struct sseu_dev_info *sseu)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const char *type = is_available_info ? "Available" : "Enabled";
- int s;
-
- seq_printf(m, " %s Slice Mask: %04x\n", type,
- sseu->slice_mask);
- seq_printf(m, " %s Slice Total: %u\n", type,
- hweight8(sseu->slice_mask));
- seq_printf(m, " %s Subslice Total: %u\n", type,
- intel_sseu_subslice_total(sseu));
- for (s = 0; s < fls(sseu->slice_mask); s++) {
- seq_printf(m, " %s Slice%i subslices: %u\n", type,
- s, intel_sseu_subslices_per_slice(sseu, s));
- }
- seq_printf(m, " %s EU Total: %u\n", type,
- sseu->eu_total);
- seq_printf(m, " %s EU Per Subslice: %u\n", type,
- sseu->eu_per_subslice);
-
- if (!is_available_info)
- return;
-
- seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
- if (HAS_POOLED_EU(dev_priv))
- seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
-
- seq_printf(m, " Has Slice Power Gating: %s\n",
- yesno(sseu->has_slice_pg));
- seq_printf(m, " Has Subslice Power Gating: %s\n",
- yesno(sseu->has_subslice_pg));
- seq_printf(m, " Has EU Power Gating: %s\n",
- yesno(sseu->has_eu_pg));
-}
-
static int i915_sseu_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
- struct sseu_dev_info sseu;
- intel_wakeref_t wakeref;
-
- if (INTEL_GEN(dev_priv) < 8)
- return -ENODEV;
-
- seq_puts(m, "SSEU Device Info\n");
- i915_print_sseu_info(m, true, &info->sseu);
-
- seq_puts(m, "SSEU Device Status\n");
- memset(&sseu, 0, sizeof(sseu));
- intel_sseu_set_info(&sseu, info->sseu.max_slices,
- info->sseu.max_subslices,
- info->sseu.max_eus_per_subslice);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_sseu_device_status(dev_priv, &sseu);
- else if (IS_BROADWELL(dev_priv))
- bdw_sseu_device_status(dev_priv, &sseu);
- else if (IS_GEN(dev_priv, 9))
- gen9_sseu_device_status(dev_priv, &sseu);
- else if (INTEL_GEN(dev_priv) >= 10)
- gen10_sseu_device_status(dev_priv, &sseu);
- }
-
- i915_print_sseu_info(m, false, &sseu);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_gt *gt = &i915->gt;
- return 0;
+ return intel_sseu_status(m, gt);
}
static int i915_forcewake_open(struct inode *inode, struct file *file)
@@ -1876,7 +1629,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_llc", i915_llc, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_engine_info", i915_engine_info, 0},
- {"i915_rcs_topology", i915_rcs_topology, 0},
{"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_sseu_status", i915_sseu_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 62b2c5f0495d..4e2b077692cb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -138,9 +138,6 @@ static ssize_t i915_param_charp_write(struct file *file,
char **s = m->private;
char *new, *old;
- /* FIXME: remove locking after params aren't the module params */
- kernel_param_lock(THIS_MODULE);
-
old = *s;
new = strndup_user(ubuf, PAGE_SIZE);
if (IS_ERR(new)) {
@@ -152,8 +149,6 @@ static ssize_t i915_param_charp_write(struct file *file,
kfree(old);
out:
- kernel_param_unlock(THIS_MODULE);
-
return len;
}
@@ -229,7 +224,7 @@ _i915_param_create_file(struct dentry *parent, const char *name,
struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
- struct i915_params *params = &i915_modparams;
+ struct i915_params *params = &i915->params;
struct dentry *dir;
dir = debugfs_create_dir("i915_params", minor->debugfs_root);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 34ee12f3f02d..5fd5af4bc855 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -501,6 +501,8 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
cpu_latency_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
+
+ i915_params_free(&dev_priv->params);
}
/**
@@ -529,13 +531,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
- intel_device_info_init_mmio(dev_priv);
-
- intel_uncore_prune_mmio_domains(&dev_priv->uncore);
-
- intel_uc_init_mmio(&dev_priv->gt.uc);
-
- ret = intel_engines_init_mmio(&dev_priv->gt);
+ ret = intel_gt_init_mmio(&dev_priv->gt);
if (ret)
goto err_uncore;
@@ -888,6 +884,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
+ intel_gt_info_print(&dev_priv->gt.info, &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -915,6 +912,9 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
+ /* Device parameters start as a copy of module parameters. */
+ i915_params_copy(&i915->params, &i915_modparams);
+
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
memcpy(device_info, match_info, sizeof(*device_info));
@@ -948,7 +948,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
- if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
+ if (!i915->params.nuclear_pageflip && match_info->gen < 5)
i915->drm.driver_features &= ~DRIVER_ATOMIC;
/*
@@ -958,7 +958,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
- i915_modparams.fake_lmem_start) {
+ i915->params.fake_lmem_start) {
mkwrite_device_info(i915)->memory_regions =
REGION_SMEM | REGION_LMEM | REGION_STOLEN;
mkwrite_device_info(i915)->is_dgfx = true;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ae99a9190200..e4f7f6518945 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -108,8 +108,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200515"
-#define DRIVER_TIMESTAMP 1589543364
+#define DRIVER_DATE "20200715"
+#define DRIVER_TIMESTAMP 1594811881
struct drm_i915_gem_object;
@@ -273,6 +273,7 @@ struct drm_i915_display_funcs {
void (*set_cdclk)(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe);
+ int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
@@ -421,6 +422,7 @@ struct intel_fbc {
unsigned int fence_y_offset;
u16 gen9_wa_cfb_stride;
+ u16 interval;
s8 fence_id;
} state_cache;
@@ -446,6 +448,7 @@ struct intel_fbc {
int cfb_size;
unsigned int fence_y_offset;
u16 gen9_wa_cfb_stride;
+ u16 interval;
s8 fence_id;
bool plane_visible;
} params;
@@ -690,6 +693,7 @@ struct intel_vbt_data {
bool initialized;
int bpp;
struct edp_power_seq pps;
+ bool hobl;
} edp;
struct {
@@ -830,6 +834,9 @@ struct drm_i915_private {
/* FIXME: Device release actions should all be moved to drmm_ */
bool do_release;
+ /* i915 device parameters */
+ struct i915_params params;
+
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
@@ -951,6 +958,13 @@ struct drm_i915_private {
struct intel_global_obj obj;
} cdclk;
+ struct {
+ /* The current hardware dbuf configuration */
+ u8 enabled_slices;
+
+ struct intel_global_obj obj;
+ } dbuf;
+
/**
* wq - Driver workqueue for GEM.
*
@@ -981,7 +995,7 @@ struct drm_i915_private {
struct i915_gem_mm mm;
DECLARE_HASHTABLE(mm_structs, 7);
- struct mutex mm_lock;
+ spinlock_t mm_lock;
/* Kernel Modesetting */
@@ -1127,12 +1141,12 @@ struct drm_i915_private {
* Set during HW readout of watermarks/DDB. Some platforms
* need to know when we're still using BIOS-provided values
* (which we don't fully trust).
+ *
+ * FIXME get rid of this.
*/
bool distrust_bios_wm;
} wm;
- u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */
-
struct dram_info {
bool valid;
bool is_16gb_dimm;
@@ -1244,7 +1258,7 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
- for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
+ for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
(tmp__) ? \
((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
@@ -1257,6 +1271,11 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
(engine__); \
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
+#define for_each_uabi_class_engine(engine__, class__, i915__) \
+ for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
+ (engine__) && (engine__)->uabi_class == (class__); \
+ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
+
#define I915_GTT_OFFSET_NONE ((u32)-1)
/*
@@ -1407,10 +1426,13 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
+#define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
+#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
+#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -1454,6 +1476,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
+
+#define IS_CML_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_CML_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \
+ INTEL_INFO(dev_priv)->gt == 2)
+
#define IS_CNL_WITH_PORT_F(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_ICL_WITH_PORT_F(dev_priv) \
@@ -1524,22 +1554,36 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_TGL_REVID(p, since, until) \
(IS_TIGERLAKE(p) && IS_REVID(p, since, until))
+#define RKL_REVID_A0 0x0
+#define RKL_REVID_B0 0x1
+#define RKL_REVID_C0 0x4
+
+#define IS_RKL_REVID(p, since, until) \
+ (IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
+
+#define DG1_REVID_A0 0x0
+#define DG1_REVID_B0 0x1
+
+#define IS_DG1_REVID(p, since, until) \
+ (IS_DG1(p) && IS_REVID(p, since, until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
-#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
+#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
+#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
-#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
+#define ENGINE_INSTANCES_MASK(gt, first, count) ({ \
unsigned int first__ = (first); \
unsigned int count__ = (count); \
- (INTEL_INFO(dev_priv)->engine_mask & \
+ ((gt)->info.engine_mask & \
GENMASK(first__ + count__ - 1, first__)) >> first__; \
})
-#define VDBOX_MASK(dev_priv) \
- ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
-#define VEBOX_MASK(dev_priv) \
- ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
+#define VDBOX_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
/*
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
@@ -1563,6 +1607,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
(INTEL_INFO(dev_priv)->has_logical_ring_preemption)
+#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
+
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
@@ -1617,6 +1663,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
+#define HAS_PSR_HW_TRACKING(dev_priv) \
+ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
@@ -1659,7 +1707,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
/* Only valid when HAS_DISPLAY() is true */
-#define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display)
+#define INTEL_DISPLAY_ENABLED(dev_priv) \
+ (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
static inline bool intel_vtd_active(void)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0cbcb9f54e7d..9aa3066cb75d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -933,6 +933,18 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
+static void discard_ggtt_vma(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ spin_lock(&obj->vma.lock);
+ if (!RB_EMPTY_NODE(&vma->obj_node)) {
+ rb_erase(&vma->obj_node, &obj->vma.tree);
+ RB_CLEAR_NODE(&vma->obj_node);
+ }
+ spin_unlock(&obj->vma.lock);
+}
+
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -979,6 +991,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
}
+new_vma:
vma = i915_vma_instance(obj, &ggtt->vm, view);
if (IS_ERR(vma))
return vma;
@@ -993,6 +1006,11 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
}
+ if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
+ discard_ggtt_vma(vma);
+ goto new_vma;
+ }
+
ret = i915_vma_unbind(vma);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 1753c84d6c0d..f333e88a2b6e 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -72,7 +72,7 @@ struct drm_i915_private;
trace_printk(__VA_ARGS__); \
} while (0)
#define GEM_TRACE_DUMP() \
- do { ftrace_dump(DUMP_ALL); add_taint_for_CI(TAINT_WARN); } while (0)
+ do { ftrace_dump(DUMP_ALL); __add_taint_for_CI(TAINT_WARN); } while (0)
#define GEM_TRACE_DUMP_ON(expr) \
do { if (expr) GEM_TRACE_DUMP(); } while (0)
#else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index cb43381b0d37..c5ee1567f3d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -31,6 +31,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_KERNEL_MAPPING |
DMA_ATTR_NO_WARN))
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index f6226df9f972..c9b0ee5e1d23 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,7 +42,6 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
-#define PIN_UPDATE BIT_ULL(9)
#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index d042644b9cd2..421613219ae9 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -12,7 +12,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
drm_i915_getparam_t *param = data;
int value;
@@ -80,7 +80,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_HAS_GPU_RESET:
- value = i915_modparams.enable_hangcheck &&
+ value = i915->params.enable_hangcheck &&
intel_has_gpu_reset(&i915->gt);
if (value && intel_has_reset_engine(&i915->gt))
value = 2;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index eec292d06f11..6a3a2ce0b394 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -42,6 +42,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
@@ -425,7 +426,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct intel_engine_coredump *ee)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
+ const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
int slice;
int subslice;
@@ -619,16 +620,13 @@ static void print_error_vma(struct drm_i915_error_state_buf *m,
}
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
- const struct intel_device_info *info,
- const struct intel_runtime_info *runtime,
- const struct intel_driver_caps *caps)
+ struct i915_gpu_coredump *error)
{
struct drm_printer p = i915_error_printer(m);
- intel_device_info_print_static(info, &p);
- intel_device_info_print_runtime(runtime, &p);
- intel_device_info_print_topology(&runtime->sseu, &p);
- intel_driver_caps_print(caps, &p);
+ intel_device_info_print_static(&error->device_info, &p);
+ intel_device_info_print_runtime(&error->runtime_info, &p);
+ intel_driver_caps_print(&error->driver_caps, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -678,6 +676,15 @@ static void err_free_sgl(struct scatterlist *sgl)
}
}
+static void err_print_gt_info(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ struct drm_printer p = i915_error_printer(m);
+
+ intel_gt_info_print(&gt->info, &p);
+ intel_sseu_print_topology(&gt->info.sseu, &p);
+}
+
static void err_print_gt(struct drm_i915_error_state_buf *m,
struct intel_gt_coredump *gt)
{
@@ -734,6 +741,8 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
if (gt->uc)
err_print_uc(m, gt->uc);
+
+ err_print_gt_info(m, gt);
}
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
@@ -798,8 +807,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->display)
intel_display_print_error_state(m, error->display);
- err_print_capabilities(m, &error->device_info, &error->runtime_info,
- &error->driver_caps);
+ err_print_capabilities(m, error);
err_print_params(m, &error->params);
}
@@ -1630,6 +1638,11 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
}
+static void gt_record_info(struct intel_gt_coredump *gt)
+{
+ memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
+}
+
/*
* Generate a semi-unique error code. The code is not meant to have meaning, The
* code's only purpose is to try to prevent false duplicated bug reports by
@@ -1698,7 +1711,7 @@ static void capture_gen(struct i915_gpu_coredump *error)
error->reset_count = i915_reset_count(&i915->gpu_error);
error->suspend_count = i915->suspend_count;
- i915_params_copy(&error->params, &i915_modparams);
+ i915_params_copy(&error->params, &i915->params);
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
@@ -1713,7 +1726,7 @@ i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
{
struct i915_gpu_coredump *error;
- if (!i915_modparams.error_capture)
+ if (!i915->params.error_capture)
return NULL;
error = kzalloc(sizeof(*error), gfp);
@@ -1808,6 +1821,7 @@ struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915)
return ERR_PTR(-ENOMEM);
}
+ gt_record_info(error->gt);
gt_record_engines(error->gt, compress);
if (INTEL_INFO(i915)->has_gt_uc)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 76b80fbfb7e9..0220b0992808 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -15,6 +15,7 @@
#include <drm/drm_mm.h>
#include "gt/intel_engine.h"
+#include "gt/intel_gt_types.h"
#include "gt/uc/intel_uc_fw.h"
#include "intel_device_info.h"
@@ -118,6 +119,8 @@ struct intel_gt_coredump {
bool awake;
bool simulated;
+ struct intel_gt_info info;
+
/* Generic register state */
u32 eir;
u32 pgtbl_er;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 284cf078135a..1fa67700d8f4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -777,7 +777,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
mode = &vblank->hwmode;
- if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
+ if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
return __intel_get_crtc_scanline_from_timestamp(crtc);
vtotal = mode->crtc_vtotal;
@@ -836,7 +836,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
unsigned long irqflags;
bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
- mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
+ crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
drm_dbg(&dev_priv->drm,
@@ -2097,67 +2097,68 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
*/
static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
- struct drm_i915_private *dev_priv = arg;
+ struct drm_i915_private *i915 = arg;
+ void __iomem * const regs = i915->uncore.regs;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
- if (!intel_irqs_enabled(dev_priv))
+ if (unlikely(!intel_irqs_enabled(i915)))
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
/* disable master interrupt before clearing iir */
- de_ier = I915_READ(DEIER);
- I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ de_ier = raw_reg_read(regs, DEIER);
+ raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
- if (!HAS_PCH_NOP(dev_priv)) {
- sde_ier = I915_READ(SDEIER);
- I915_WRITE(SDEIER, 0);
+ if (!HAS_PCH_NOP(i915)) {
+ sde_ier = raw_reg_read(regs, SDEIER);
+ raw_reg_write(regs, SDEIER, 0);
}
/* Find, clear, then process each source of interrupt */
- gt_iir = I915_READ(GTIIR);
+ gt_iir = raw_reg_read(regs, GTIIR);
if (gt_iir) {
- I915_WRITE(GTIIR, gt_iir);
- ret = IRQ_HANDLED;
- if (INTEL_GEN(dev_priv) >= 6)
- gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
+ raw_reg_write(regs, GTIIR, gt_iir);
+ if (INTEL_GEN(i915) >= 6)
+ gen6_gt_irq_handler(&i915->gt, gt_iir);
else
- gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
+ gen5_gt_irq_handler(&i915->gt, gt_iir);
+ ret = IRQ_HANDLED;
}
- de_iir = I915_READ(DEIIR);
+ de_iir = raw_reg_read(regs, DEIIR);
if (de_iir) {
- I915_WRITE(DEIIR, de_iir);
- ret = IRQ_HANDLED;
- if (INTEL_GEN(dev_priv) >= 7)
- ivb_display_irq_handler(dev_priv, de_iir);
+ raw_reg_write(regs, DEIIR, de_iir);
+ if (INTEL_GEN(i915) >= 7)
+ ivb_display_irq_handler(i915, de_iir);
else
- ilk_display_irq_handler(dev_priv, de_iir);
+ ilk_display_irq_handler(i915, de_iir);
+ ret = IRQ_HANDLED;
}
- if (INTEL_GEN(dev_priv) >= 6) {
- u32 pm_iir = I915_READ(GEN6_PMIIR);
+ if (INTEL_GEN(i915) >= 6) {
+ u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
if (pm_iir) {
- I915_WRITE(GEN6_PMIIR, pm_iir);
+ raw_reg_write(regs, GEN6_PMIIR, pm_iir);
+ gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
ret = IRQ_HANDLED;
- gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
}
}
- I915_WRITE(DEIER, de_ier);
- if (!HAS_PCH_NOP(dev_priv))
- I915_WRITE(SDEIER, sde_ier);
+ raw_reg_write(regs, DEIER, de_ier);
+ if (sde_ier)
+ raw_reg_write(regs, SDEIER, sde_ier);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
return ret;
}
@@ -2254,7 +2255,9 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11)
+ if (IS_ROCKETLAKE(dev_priv))
+ return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
+ else if (INTEL_GEN(dev_priv) >= 11)
return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
else if (INTEL_GEN(dev_priv) >= 9)
return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2581,6 +2584,46 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
gen11_master_intr_enable);
}
+static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
+{
+ u32 val;
+
+ /* First disable interrupts */
+ raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
+
+ /* Get the indication levels and ack the master unit */
+ val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
+ if (unlikely(!val))
+ return 0;
+
+ raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
+
+ /*
+ * Now with master disabled, get a sample of level indications
+ * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
+ * out as this bit doesn't exist anymore for DG1
+ */
+ val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
+ if (unlikely(!val))
+ return 0;
+
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
+
+ return val;
+}
+
+static inline void dg1_master_intr_enable(void __iomem * const regs)
+{
+ raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
+}
+
+static irqreturn_t dg1_irq_handler(int irq, void *arg)
+{
+ return __gen11_irq_handler(arg,
+ dg1_master_intr_disable_and_ack,
+ dg1_master_intr_enable);
+}
+
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -2869,13 +2912,15 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
+ u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
if (INTEL_GEN(dev_priv) >= 12) {
enum transcoder trans;
- for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2902,8 +2947,8 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
- /* Wa_14010685332:icl */
- if (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) {
+ /* Wa_14010685332:icl,jsl,ehl,tgl,rkl */
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
@@ -2915,7 +2960,10 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- gen11_master_intr_disable(dev_priv->uncore.regs);
+ if (HAS_MASTER_UNIT_IRQ(dev_priv))
+ dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
+ else
+ gen11_master_intr_disable(dev_priv->uncore.regs);
gen11_gt_irq_reset(&dev_priv->gt);
gen11_display_irq_reset(dev_priv);
@@ -3066,7 +3114,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
hotplug_irqs = sde_ddi_mask | sde_tc_mask;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
+ I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3396,6 +3445,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
+ u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
if (INTEL_GEN(dev_priv) <= 10)
@@ -3416,7 +3467,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 12) {
enum transcoder trans;
- for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -3510,8 +3561,13 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- gen11_master_intr_enable(uncore->regs);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
+ dg1_master_intr_enable(uncore->regs);
+ POSTING_READ(DG1_MSTR_UNIT_INTR);
+ } else {
+ gen11_master_intr_enable(uncore->regs);
+ POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ }
}
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -4036,6 +4092,8 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
else
return i8xx_irq_handler;
} else {
+ if (HAS_MASTER_UNIT_IRQ(dev_priv))
+ return dg1_irq_handler;
if (INTEL_GEN(dev_priv) >= 11)
return gen11_irq_handler;
else if (INTEL_GEN(dev_priv) >= 8)
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 02559da61e6e..8d8db9ff0a48 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -40,6 +40,15 @@ struct i915_params i915_modparams __read_mostly = {
#undef MEMBER
};
+/*
+ * Note: As a rule, keep module parameter sysfs permissions read-only
+ * 0400. Runtime changes are only supported through i915 debugfs.
+ *
+ * For any exceptions requiring write access and runtime changes through module
+ * parameter sysfs, prevent debugfs file creation by setting the parameter's
+ * debugfs mode to 0.
+ */
+
i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
@@ -49,7 +58,7 @@ i915_param_named_unsafe(enable_dc, int, 0400,
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
"3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
-i915_param_named_unsafe(enable_fbc, int, 0600,
+i915_param_named_unsafe(enable_fbc, int, 0400,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
@@ -57,7 +66,7 @@ i915_param_named_unsafe(lvds_channel_mode, int, 0400,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-i915_param_named_unsafe(panel_use_ssc, int, 0600,
+i915_param_named_unsafe(panel_use_ssc, int, 0400,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
@@ -65,29 +74,34 @@ i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-i915_param_named_unsafe(reset, uint, 0600,
+i915_param_named_unsafe(reset, uint, 0400,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
i915_param_named_unsafe(vbt_firmware, charp, 0400,
"Load VBT from specified file under /lib/firmware");
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-i915_param_named(error_capture, bool, 0600,
+i915_param_named(error_capture, bool, 0400,
"Record the GPU state following a hang. "
"This information in /sys/class/drm/card<N>/error is vital for "
"triaging and debugging hangs.");
#endif
-i915_param_named_unsafe(enable_hangcheck, bool, 0600,
+i915_param_named_unsafe(enable_hangcheck, bool, 0400,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-i915_param_named_unsafe(enable_psr, int, 0600,
+i915_param_named_unsafe(enable_psr, int, 0400,
"Enable PSR "
"(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
+i915_param_named(psr_safest_params, bool, 0400,
+ "Replace PSR VBT parameters by the safest and not optimal ones. This "
+ "is helpful to detect if PSR issues are related to bad values set in "
+ " VBT. (0=use VBT parameters, 1=use safest parameters)");
+
i915_param_named_unsafe(force_probe, charp, 0400,
"Force probe the driver for specified devices. "
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
@@ -96,22 +110,22 @@ i915_param_named_unsafe(disable_power_well, int, 0400,
"Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
-i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)");
+i915_param_named_unsafe(enable_ips, int, 0400, "Enable IPS (default: true)");
-i915_param_named(fastboot, int, 0600,
+i915_param_named(fastboot, int, 0400,
"Try to skip unnecessary mode sets at boot time "
"(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
-i915_param_named_unsafe(load_detect_test, bool, 0600,
+i915_param_named_unsafe(load_detect_test, bool, 0400,
"Force-enable the VGA load detect code for testing (default:false). "
"For developers only.");
-i915_param_named_unsafe(force_reset_modeset_test, bool, 0600,
+i915_param_named_unsafe(force_reset_modeset_test, bool, 0400,
"Force a modeset during gpu reset for testing (default:false). "
"For developers only.");
-i915_param_named_unsafe(invert_brightness, int, 0600,
+i915_param_named_unsafe(invert_brightness, int, 0400,
"Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
@@ -121,10 +135,11 @@ i915_param_named_unsafe(invert_brightness, int, 0600,
i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)");
-i915_param_named(mmio_debug, int, 0600,
+i915_param_named(mmio_debug, int, 0400,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
+/* Special case writable file */
i915_param_named(verbose_state_checks, bool, 0600,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
@@ -155,7 +170,7 @@ i915_param_named_unsafe(huc_firmware_path, charp, 0400,
i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
"DMC firmware path to use instead of the default one");
-i915_param_named_unsafe(enable_dp_mst, bool, 0600,
+i915_param_named_unsafe(enable_dp_mst, bool, 0400,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
@@ -163,7 +178,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
-i915_param_named(enable_dpcd_backlight, int, 0600,
+i915_param_named(enable_dpcd_backlight, int, 0400,
"Enable support for DPCD backlight control"
"(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 4f21bfffbf0e..53fb5ba8fbed 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -53,6 +53,7 @@ struct drm_printer;
param(int, enable_dc, -1, 0400) \
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
+ param(bool, psr_safest_params, false, 0600) \
param(int, disable_power_well, -1, 0400) \
param(int, enable_ips, 1, 0600) \
param(int, invert_brightness, 0, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index eb0b5be7c35d..2338f92ce490 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -168,7 +168,7 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -188,7 +188,7 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -217,6 +217,7 @@ static const struct intel_device_info i85x_info = {
static const struct intel_device_info i865g_info = {
I845_FEATURES,
PLATFORM(INTEL_I865G),
+ .display.has_fbc = 1,
};
#define GEN3_FEATURES \
@@ -225,7 +226,7 @@ static const struct intel_device_info i865g_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
.dma_mask_size = 32, \
@@ -316,7 +317,7 @@ static const struct intel_device_info pnv_m_info = {
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
.dma_mask_size = 36, \
@@ -348,7 +349,7 @@ static const struct intel_device_info i965gm_info = {
static const struct intel_device_info g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
- .engine_mask = BIT(RCS0) | BIT(VCS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -358,7 +359,7 @@ static const struct intel_device_info gm45_info = {
.is_mobile = 1,
.display.has_fbc = 1,
.display.supports_tv = 1,
- .engine_mask = BIT(RCS0) | BIT(VCS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -367,7 +368,7 @@ static const struct intel_device_info gm45_info = {
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
- .engine_mask = BIT(RCS0) | BIT(VCS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
@@ -397,7 +398,7 @@ static const struct intel_device_info ilk_m_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
@@ -448,7 +449,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
@@ -519,7 +520,7 @@ static const struct intel_device_info vlv_info = {
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
.display_mmio_offset = VLV_DISPLAY_BASE,
I9XX_PIPE_OFFSETS,
I9XX_CURSOR_OFFSETS,
@@ -530,12 +531,13 @@ static const struct intel_device_info vlv_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_psr = 1, \
+ .display.has_psr_hw_tracking = 1, \
.display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
HSW_PIPE_OFFSETS, \
@@ -596,7 +598,7 @@ static const struct intel_device_info bdw_rsvd_info = {
static const struct intel_device_info bdw_gt3_info = {
BDW_PLATFORM,
.gt = 3,
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -607,7 +609,7 @@ static const struct intel_device_info chv_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
.display.has_hotplug = 1,
.is_lp = 1,
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
@@ -660,7 +662,7 @@ static const struct intel_device_info skl_gt2_info = {
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
- .engine_mask = \
+ .platform_engine_mask = \
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
@@ -679,7 +681,7 @@ static const struct intel_device_info skl_gt4_info = {
.is_lp = 1, \
.num_supported_dbuf_slices = 1, \
.display.has_hotplug = 1, \
- .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
@@ -690,6 +692,7 @@ static const struct intel_device_info skl_gt4_info = {
.display.has_fbc = 1, \
.display.has_hdcp = 1, \
.display.has_psr = 1, \
+ .display.has_psr_hw_tracking = 1, \
.has_runtime_pm = 1, \
.display.has_csr = 1, \
.has_rc6 = 1, \
@@ -741,7 +744,7 @@ static const struct intel_device_info kbl_gt2_info = {
static const struct intel_device_info kbl_gt3_info = {
KBL_PLATFORM,
.gt = 3,
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -762,10 +765,24 @@ static const struct intel_device_info cfl_gt2_info = {
static const struct intel_device_info cfl_gt3_info = {
CFL_PLATFORM,
.gt = 3,
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
+#define CML_PLATFORM \
+ GEN9_FEATURES, \
+ PLATFORM(INTEL_COMETLAKE)
+
+static const struct intel_device_info cml_gt1_info = {
+ CML_PLATFORM,
+ .gt = 1,
+};
+
+static const struct intel_device_info cml_gt2_info = {
+ CML_PLATFORM,
+ .gt = 2,
+};
+
#define GEN10_FEATURES \
GEN9_FEATURES, \
GEN(10), \
@@ -788,6 +805,7 @@ static const struct intel_device_info cnl_info = {
#define GEN11_FEATURES \
GEN10_FEATURES, \
GEN11_DEFAULT_PAGE_SIZES, \
+ .abox_mask = BIT(0), \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
@@ -816,7 +834,7 @@ static const struct intel_device_info cnl_info = {
static const struct intel_device_info icl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
@@ -824,13 +842,14 @@ static const struct intel_device_info ehl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
.require_force_probe = 1,
- .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
.ppgtt_size = 36,
};
#define GEN12_FEATURES \
GEN11_FEATURES, \
GEN(12), \
+ .abox_mask = GENMASK(2, 1), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
@@ -859,14 +878,39 @@ static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.display.has_modular_fia = 1,
- .engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
+static const struct intel_device_info rkl_info = {
+ GEN12_FEATURES,
+ PLATFORM(INTEL_ROCKETLAKE),
+ .abox_mask = BIT(0),
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C),
+ .require_force_probe = 1,
+ .display.has_psr_hw_tracking = 0,
+ .platform_engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
+};
+
#define GEN12_DGFX_FEATURES \
GEN12_FEATURES, \
+ .memory_regions = REGION_SMEM | REGION_LMEM, \
+ .has_master_unit_irq = 1, \
.is_dgfx = 1
+static const struct intel_device_info dg1_info __maybe_unused = {
+ GEN12_DGFX_FEATURES,
+ PLATFORM(INTEL_DG1),
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .require_force_probe = 1,
+ .platform_engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
+ BIT(VCS0) | BIT(VCS2),
+};
+
#undef GEN
#undef PLATFORM
@@ -933,14 +977,15 @@ static const struct pci_device_id pciidlist[] = {
INTEL_WHL_U_GT2_IDS(&cfl_gt2_info),
INTEL_AML_CFL_GT2_IDS(&cfl_gt2_info),
INTEL_WHL_U_GT3_IDS(&cfl_gt3_info),
- INTEL_CML_GT1_IDS(&cfl_gt1_info),
- INTEL_CML_GT2_IDS(&cfl_gt2_info),
- INTEL_CML_U_GT1_IDS(&cfl_gt1_info),
- INTEL_CML_U_GT2_IDS(&cfl_gt2_info),
+ INTEL_CML_GT1_IDS(&cml_gt1_info),
+ INTEL_CML_GT2_IDS(&cml_gt2_info),
+ INTEL_CML_U_GT1_IDS(&cml_gt1_info),
+ INTEL_CML_U_GT2_IDS(&cml_gt2_info),
INTEL_CNL_IDS(&cnl_info),
INTEL_ICL_11_IDS(&icl_info),
INTEL_EHL_IDS(&ehl_info),
INTEL_TGL_12_IDS(&tgl_info),
+ INTEL_RKL_IDS(&rkl_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 014f34c047d5..c6f6370283cf 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1773,7 +1773,7 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
i915_gem_object_flush_map(bo);
- i915_gem_object_unpin_map(bo);
+ __i915_gem_object_release_map(bo);
stream->noa_wait = vma;
return 0;
@@ -1868,7 +1868,7 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
*cs++ = 0;
i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_map(obj);
oa_bo->vma = i915_vma_instance(obj,
&stream->engine->gt->ggtt->vm,
@@ -2197,7 +2197,7 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
if (!intel_context_pin_if_active(ce))
continue;
- flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
+ flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
err = gen8_modify_context(ce, flex, count);
intel_context_unpin(ce);
@@ -2341,7 +2341,7 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
if (engine->class != RENDER_CLASS)
continue;
- regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
+ regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
err = gen8_modify_self(ce, regs, num_regs, active);
if (err)
@@ -2741,8 +2741,7 @@ static void
get_default_sseu_config(struct intel_sseu *out_sseu,
struct intel_engine_cs *engine)
{
- const struct sseu_dev_info *devinfo_sseu =
- &RUNTIME_INFO(engine->i915)->sseu;
+ const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
@@ -2767,7 +2766,7 @@ get_sseu_config(struct intel_sseu *out_sseu,
drm_sseu->engine.engine_instance != engine->uabi_instance)
return -EINVAL;
- return i915_gem_user_to_context_sseu(engine->i915, drm_sseu, out_sseu);
+ return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 962ded9ce73f..28bc5f13ae52 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -441,7 +441,11 @@ static u64 count_interrupts(struct drm_i915_private *i915)
static void i915_pmu_event_destroy(struct perf_event *event)
{
- WARN_ON(event->parent);
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+
+ drm_WARN_ON(&i915->drm, event->parent);
+
module_put(THIS_MODULE);
}
@@ -561,7 +565,10 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
/* Do nothing */
} else if (sample == I915_SAMPLE_BUSY &&
intel_engine_supports_stats(engine)) {
- val = ktime_to_ns(intel_engine_get_busy_time(engine));
+ ktime_t unused;
+
+ val = ktime_to_ns(intel_engine_get_busy_time(engine,
+ &unused));
} else {
val = engine->pmu.sample[sample].cur;
}
@@ -1058,8 +1065,10 @@ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
- WARN_ON(pmu->cpuhp.slot == CPUHP_INVALID);
- WARN_ON(cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+
+ drm_WARN_ON(&i915->drm, pmu->cpuhp.slot == CPUHP_INVALID);
+ drm_WARN_ON(&i915->drm, cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
cpuhp_remove_multi_state(pmu->cpuhp.slot);
pmu->cpuhp.slot = CPUHP_INVALID;
}
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index e75c528ebbe0..fed337ad7b68 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -31,7 +31,7 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
int ret;
@@ -109,8 +109,7 @@ query_engine_info(struct drm_i915_private *i915,
for_each_uabi_engine(engine, i915)
num_uabi_engines++;
- len = sizeof(struct drm_i915_query_engine_info) +
- num_uabi_engines * sizeof(struct drm_i915_engine_info);
+ len = struct_size(query_ptr, engines, num_uabi_engines);
ret = copy_query_item(&query, sizeof(query), len, query_item);
if (ret != 0)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 06cd1d28a176..4e796ff4d7d0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -868,7 +868,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG1 _MMIO(0x2740)
#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
-#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+#define OAREPORTTRIG1_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG2 _MMIO(0x2744)
#define OAREPORTTRIG2_INVERT_A_0 (1 << 0)
@@ -921,7 +921,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG5 _MMIO(0x2750)
#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
-#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+#define OAREPORTTRIG5_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */
#define OAREPORTTRIG6 _MMIO(0x2754)
#define OAREPORTTRIG6_INVERT_A_0 (1 << 0)
@@ -1869,9 +1869,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _ICL_COMBOPHY_A 0x162000
#define _ICL_COMBOPHY_B 0x6C000
#define _EHL_COMBOPHY_C 0x160000
+#define _RKL_COMBOPHY_D 0x161000
#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \
_ICL_COMBOPHY_B, \
- _EHL_COMBOPHY_C)
+ _EHL_COMBOPHY_C, \
+ _RKL_COMBOPHY_D)
/* CNL/ICL Port CL_DW registers */
#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
@@ -1972,6 +1974,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy))
+#define DCC_MODE_SELECT_MASK (0x3 << 20)
+#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20)
#define COMMON_KEEPER_EN (1 << 26)
#define LATENCY_OPTIM_MASK (0x3 << 2)
#define LATENCY_OPTIM_VAL(x) ((x) << 2)
@@ -2070,6 +2074,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
+#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
+#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
+#define ICL_PORT_TX_DW8_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(8, 0, phy))
+#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31)
+#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29)
+#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1)
+
#define _ICL_DPHY_CHKN_REG 0x194
#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7)
@@ -2825,6 +2836,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
+#define SCPD_FBC_IGNORE_3D (1 << 6)
#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
#define GEN2_IER _MMIO(0x20a0)
#define GEN2_IIR _MMIO(0x20a4)
@@ -2877,9 +2889,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
-#define MBUS_ABOX_CTL _MMIO(0x45038)
-#define MBUS_ABOX1_CTL _MMIO(0x45048)
-#define MBUS_ABOX2_CTL _MMIO(0x4504C)
+#define _MBUS_ABOX0_CTL 0x45038
+#define _MBUS_ABOX1_CTL 0x45048
+#define _MBUS_ABOX2_CTL 0x4504C
+#define MBUS_ABOX_CTL(x) _MMIO(_PICK(x, _MBUS_ABOX0_CTL, \
+ _MBUS_ABOX1_CTL, \
+ _MBUS_ABOX2_CTL))
#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
@@ -3203,13 +3218,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
#define FBC_CONTROL _MMIO(0x3208)
-#define FBC_CTL_EN (1 << 31)
-#define FBC_CTL_PERIODIC (1 << 30)
-#define FBC_CTL_INTERVAL_SHIFT (16)
-#define FBC_CTL_UNCOMPRESSIBLE (1 << 14)
-#define FBC_CTL_C3_IDLE (1 << 13)
-#define FBC_CTL_STRIDE_SHIFT (5)
-#define FBC_CTL_FENCENO_SHIFT (0)
+#define FBC_CTL_EN REG_BIT(31)
+#define FBC_CTL_PERIODIC REG_BIT(30)
+#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
+#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
+#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
+#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
+#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm */
+#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
+#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
+#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
#define FBC_COMMAND _MMIO(0x320c)
#define FBC_CMD_COMPRESS (1 << 0)
#define FBC_STATUS _MMIO(0x3210)
@@ -3768,19 +3787,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Clocking configuration register */
#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
-#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_400_ALT (5 << 0) /* hrawclk 100 */
#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */
#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
-/*
- * Note that on at least on ELK the below value is reported for both
- * 333 and 400 MHz BIOS FSB setting, but given that the gmch datasheet
- * lists only 200/266/333 MHz FSB as supported let's decode it as 333 MHz.
- */
#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */
+#define CLKCFG_FSB_1600_ALT (6 << 0) /* hrawclk 400 */
#define CLKCFG_FSB_MASK (7 << 0)
#define CLKCFG_MEM_533 (1 << 4)
#define CLKCFG_MEM_667 (2 << 4)
@@ -4512,25 +4528,39 @@ enum {
#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
-#define _PSR2_CTL_A 0x60900
-#define _PSR2_CTL_EDP 0x6f900
-#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
-#define EDP_PSR2_ENABLE (1 << 31)
-#define EDP_SU_TRACK_ENABLE (1 << 30)
-#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
-#define EDP_Y_COORDINATE_ENABLE (1 << 25) /* GLK and CNL+ */
-#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
-#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
-#define EDP_PSR2_TP2_TIME_500us (0 << 8)
-#define EDP_PSR2_TP2_TIME_100us (1 << 8)
-#define EDP_PSR2_TP2_TIME_2500us (2 << 8)
-#define EDP_PSR2_TP2_TIME_50us (3 << 8)
-#define EDP_PSR2_TP2_TIME_MASK (3 << 8)
-#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
-#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4)
-#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
-#define EDP_PSR2_IDLE_FRAME_MASK 0xf
-#define EDP_PSR2_IDLE_FRAME_SHIFT 0
+#define _PSR2_CTL_A 0x60900
+#define _PSR2_CTL_EDP 0x6f900
+#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
+#define EDP_PSR2_ENABLE (1 << 31)
+#define EDP_SU_TRACK_ENABLE (1 << 30)
+#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28)
+#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28)
+#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
+#define EDP_Y_COORDINATE_ENABLE (1 << 25) /* GLK and CNL+ */
+#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
+#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
+#define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8
+#define EDP_PSR2_IO_BUFFER_WAKE(lines) ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13)
+#define EDP_PSR2_IO_BUFFER_WAKE_MASK (3 << 13)
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES 5
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << 13)
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK (7 << 13)
+#define EDP_PSR2_FAST_WAKE_MAX_LINES 8
+#define EDP_PSR2_FAST_WAKE(lines) ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11)
+#define EDP_PSR2_FAST_WAKE_MASK (3 << 11)
+#define TGL_EDP_PSR2_FAST_WAKE_MIN_LINES 5
+#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << 10)
+#define TGL_EDP_PSR2_FAST_WAKE_MASK (7 << 10)
+#define EDP_PSR2_TP2_TIME_500us (0 << 8)
+#define EDP_PSR2_TP2_TIME_100us (1 << 8)
+#define EDP_PSR2_TP2_TIME_2500us (2 << 8)
+#define EDP_PSR2_TP2_TIME_50us (3 << 8)
+#define EDP_PSR2_TP2_TIME_MASK (3 << 8)
+#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
+#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4)
+#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
+#define EDP_PSR2_IDLE_FRAME_MASK 0xf
+#define EDP_PSR2_IDLE_FRAME_SHIFT 0
#define _PSR_EVENT_TRANS_A 0x60848
#define _PSR_EVENT_TRANS_B 0x61848
@@ -4569,6 +4599,18 @@ enum {
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
#define PSR2_SU_STATUS_FRAMES 8
+#define _PSR2_MAN_TRK_CTL_A 0x60910
+#define _PSR2_MAN_TRK_CTL_EDP 0x6f910
+#define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A)
+#define PSR2_MAN_TRK_CTL_ENABLE REG_BIT(31)
+#define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(30, 21)
+#define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
+#define PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(20, 11)
+#define PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
+#define PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(3)
+#define PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(2)
+#define PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(1)
+
/* VGA port control */
#define ADPA _MMIO(0x61100)
#define PCH_ADPA _MMIO(0xe1100)
@@ -6915,6 +6957,8 @@ enum {
#define _PLANE_CUS_CTL_1_A 0x701c8
#define _PLANE_CUS_CTL_2_A 0x702c8
#define PLANE_CUS_ENABLE (1 << 31)
+#define PLANE_CUS_PLANE_4_RKL (0 << 30)
+#define PLANE_CUS_PLANE_5_RKL (1 << 30)
#define PLANE_CUS_PLANE_6 (0 << 30)
#define PLANE_CUS_PLANE_7 (1 << 30)
#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19)
@@ -6933,7 +6977,7 @@ enum {
#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */
#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
-#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17)
+#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 (1 << 17)
#define PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709 (2 << 17)
#define PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020 (3 << 17)
#define PLANE_COLOR_CSC_MODE_RGB709_TO_RGB2020 (4 << 17)
@@ -7130,7 +7174,52 @@ enum {
#define PLANE_COLOR_CTL(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_COLOR_CTL_1(pipe), _PLANE_COLOR_CTL_2(pipe))
-#/* SKL new cursor registers */
+#define _SEL_FETCH_PLANE_BASE_1_A 0x70890
+#define _SEL_FETCH_PLANE_BASE_2_A 0x708B0
+#define _SEL_FETCH_PLANE_BASE_3_A 0x708D0
+#define _SEL_FETCH_PLANE_BASE_4_A 0x708F0
+#define _SEL_FETCH_PLANE_BASE_5_A 0x70920
+#define _SEL_FETCH_PLANE_BASE_6_A 0x70940
+#define _SEL_FETCH_PLANE_BASE_7_A 0x70960
+#define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880
+#define _SEL_FETCH_PLANE_BASE_1_B 0x70990
+
+#define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \
+ _SEL_FETCH_PLANE_BASE_1_A, \
+ _SEL_FETCH_PLANE_BASE_2_A, \
+ _SEL_FETCH_PLANE_BASE_3_A, \
+ _SEL_FETCH_PLANE_BASE_4_A, \
+ _SEL_FETCH_PLANE_BASE_5_A, \
+ _SEL_FETCH_PLANE_BASE_6_A, \
+ _SEL_FETCH_PLANE_BASE_7_A, \
+ _SEL_FETCH_PLANE_BASE_CUR_A)
+#define _SEL_FETCH_PLANE_BASE_1(pipe) _PIPE(pipe, _SEL_FETCH_PLANE_BASE_1_A, _SEL_FETCH_PLANE_BASE_1_B)
+#define _SEL_FETCH_PLANE_BASE(pipe, plane) (_SEL_FETCH_PLANE_BASE_1(pipe) - \
+ _SEL_FETCH_PLANE_BASE_1_A + \
+ _SEL_FETCH_PLANE_BASE_A(plane))
+
+#define _SEL_FETCH_PLANE_CTL_1_A 0x70890
+#define PLANE_SEL_FETCH_CTL(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \
+ _SEL_FETCH_PLANE_CTL_1_A - \
+ _SEL_FETCH_PLANE_BASE_1_A)
+#define PLANE_SEL_FETCH_CTL_ENABLE REG_BIT(31)
+
+#define _SEL_FETCH_PLANE_POS_1_A 0x70894
+#define PLANE_SEL_FETCH_POS(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \
+ _SEL_FETCH_PLANE_POS_1_A - \
+ _SEL_FETCH_PLANE_BASE_1_A)
+
+#define _SEL_FETCH_PLANE_SIZE_1_A 0x70898
+#define PLANE_SEL_FETCH_SIZE(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \
+ _SEL_FETCH_PLANE_SIZE_1_A - \
+ _SEL_FETCH_PLANE_BASE_1_A)
+
+#define _SEL_FETCH_PLANE_OFFSET_1_A 0x7089C
+#define PLANE_SEL_FETCH_OFFSET(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \
+ _SEL_FETCH_PLANE_OFFSET_1_A - \
+ _SEL_FETCH_PLANE_BASE_1_A)
+
+/* SKL new cursor registers */
#define _CUR_BUF_CFG_A 0x7017c
#define _CUR_BUF_CFG_B 0x7117c
#define CUR_BUF_CFG(pipe) _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
@@ -7581,6 +7670,9 @@ enum {
GEN11_PIPE_PLANE7_FAULT | \
GEN11_PIPE_PLANE6_FAULT | \
GEN11_PIPE_PLANE5_FAULT)
+#define RKL_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
+ GEN11_PIPE_PLANE5_FAULT)
#define GEN8_DE_PORT_ISR _MMIO(0x44440)
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
@@ -7641,6 +7733,10 @@ enum {
#define GEN11_GT_DW1_IRQ (1 << 1)
#define GEN11_GT_DW0_IRQ (1 << 0)
+#define DG1_MSTR_UNIT_INTR _MMIO(0x190008)
+#define DG1_MSTR_IRQ REG_BIT(31)
+#define DG1_MSTR_UNIT(u) REG_BIT(u)
+
#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
@@ -7773,11 +7869,12 @@ enum {
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
-#define CHICKEN_PAR1_1 _MMIO(0x42080)
+#define CHICKEN_PAR1_1 _MMIO(0x42080)
#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15)
-#define DPA_MASK_VBLANK_SRD (1 << 15)
-#define FORCE_ARB_IDLE_PLANES (1 << 14)
-#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
+#define DPA_MASK_VBLANK_SRD (1 << 15)
+#define FORCE_ARB_IDLE_PLANES (1 << 14)
+#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
+#define IGNORE_PSR2_HW_TRACKING (1 << 1)
#define CHICKEN_PAR2_1 _MMIO(0x42090)
#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
@@ -7835,13 +7932,20 @@ enum {
#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
-#define BW_BUDDY1_CTL _MMIO(0x45140)
-#define BW_BUDDY2_CTL _MMIO(0x45150)
+#define _BW_BUDDY0_CTL 0x45130
+#define _BW_BUDDY1_CTL 0x45140
+#define BW_BUDDY_CTL(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_CTL, \
+ _BW_BUDDY1_CTL))
#define BW_BUDDY_DISABLE REG_BIT(31)
#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
+#define BW_BUDDY_TLB_REQ_TIMER(x) REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, x)
-#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144)
-#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154)
+#define _BW_BUDDY0_PAGE_MASK 0x45134
+#define _BW_BUDDY1_PAGE_MASK 0x45144
+#define BW_BUDDY_PAGE_MASK(x) _MMIO(_PICK_EVEN(x, \
+ _BW_BUDDY0_PAGE_MASK, \
+ _BW_BUDDY1_PAGE_MASK))
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
@@ -7852,6 +7956,12 @@ enum {
#define MASK_WAKEMEM (1 << 13)
#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
+#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
+#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
+#define DCPR_MASK_LPMODE REG_BIT(26)
+#define DCPR_SEND_RESP_IMM REG_BIT(25)
+#define DCPR_CLEAR_MEMSTAT_DIS REG_BIT(24)
+
#define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
@@ -8002,6 +8112,8 @@ enum {
#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
#define FF_MODE2 _MMIO(0x6604)
+#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24)
+#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224)
#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index def62100e666..0b2fe55e6194 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -42,7 +42,6 @@
#include "intel_pm.h"
struct execute_cb {
- struct list_head link;
struct irq_work work;
struct i915_sw_fence *fence;
void (*hook)(struct i915_request *rq, struct dma_fence *signal);
@@ -57,7 +56,7 @@ static struct i915_global_request {
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
- return dev_name(to_request(fence)->i915->drm.dev);
+ return dev_name(to_request(fence)->engine->i915->drm.dev);
}
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
@@ -189,14 +188,15 @@ static void irq_execute_cb_hook(struct irq_work *wrk)
static void __notify_execute_cb(struct i915_request *rq)
{
- struct execute_cb *cb;
+ struct execute_cb *cb, *cn;
lockdep_assert_held(&rq->lock);
- if (list_empty(&rq->execute_cb))
+ GEM_BUG_ON(!i915_request_is_active(rq));
+ if (llist_empty(&rq->execute_cb))
return;
- list_for_each_entry(cb, &rq->execute_cb, link)
+ llist_for_each_entry_safe(cb, cn, rq->execute_cb.first, work.llnode)
irq_work_queue(&cb->work);
/*
@@ -209,7 +209,7 @@ static void __notify_execute_cb(struct i915_request *rq)
* preempt-to-idle cycle on the target engine, all the while the
* master execute_cb may refire.
*/
- INIT_LIST_HEAD(&rq->execute_cb);
+ init_llist_head(&rq->execute_cb);
}
static inline void
@@ -327,7 +327,7 @@ bool i915_request_retire(struct i915_request *rq)
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
__notify_execute_cb(rq);
}
- GEM_BUG_ON(!list_empty(&rq->execute_cb));
+ GEM_BUG_ON(!llist_empty(&rq->execute_cb));
spin_unlock_irq(&rq->lock);
remove_from_client(rq);
@@ -357,6 +357,12 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (i915_request_retire(tmp) && tmp != rq);
}
+static void __llist_add(struct llist_node *node, struct llist_head *head)
+{
+ node->next = head->first;
+ head->first = node;
+}
+
static struct i915_request * const *
__engine_active(struct intel_engine_cs *engine)
{
@@ -442,7 +448,7 @@ __await_execution(struct i915_request *rq,
i915_sw_fence_complete(cb->fence);
kmem_cache_free(global.slab_execute_cbs, cb);
} else {
- list_add_tail(&cb->link, &signal->execute_cb);
+ __llist_add(&cb->work.llnode, &signal->execute_cb);
}
spin_unlock_irq(&signal->lock);
@@ -554,22 +560,25 @@ bool __i915_request_submit(struct i915_request *request)
engine->serial++;
result = true;
-xfer: /* We may be recursing from the signal callback of another i915 fence */
- spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
-
+xfer:
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
list_move_tail(&request->sched.link, &engine->active.requests);
clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
}
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
- !i915_request_enable_breadcrumb(request))
- intel_engine_signal_breadcrumbs(engine);
+ /* We may be recursing from the signal callback of another i915 fence */
+ if (!i915_request_signaled(request)) {
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- __notify_execute_cb(request);
+ __notify_execute_cb(request);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &request->fence.flags) &&
+ !i915_request_enable_breadcrumb(request))
+ intel_engine_signal_breadcrumbs(engine);
- spin_unlock(&request->lock);
+ spin_unlock(&request->lock);
+ GEM_BUG_ON(!llist_empty(&request->execute_cb));
+ }
return result;
}
@@ -751,7 +760,7 @@ static void __i915_request_ctor(void *arg)
rq->file_priv = NULL;
rq->capture_list = NULL;
- INIT_LIST_HEAD(&rq->execute_cb);
+ init_llist_head(&rq->execute_cb);
}
struct i915_request *
@@ -806,7 +815,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
}
}
- rq->i915 = ce->engine->i915;
rq->context = ce;
rq->engine = ce->engine;
rq->ring = ce->ring;
@@ -841,7 +849,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->batch = NULL;
GEM_BUG_ON(rq->file_priv);
GEM_BUG_ON(rq->capture_list);
- GEM_BUG_ON(!list_empty(&rq->execute_cb));
+ GEM_BUG_ON(!llist_empty(&rq->execute_cb));
/*
* Reserve space in the ring buffer for all the commands required to
@@ -1005,12 +1013,12 @@ __emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
u32 seqno)
{
- const int has_token = INTEL_GEN(to->i915) >= 12;
+ const int has_token = INTEL_GEN(to->engine->i915) >= 12;
u32 hwsp_offset;
int len, err;
u32 *cs;
- GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
+ GEM_BUG_ON(INTEL_GEN(to->engine->i915) < 8);
GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
/* We need to pin the signaler's HWSP until we are finished reading. */
@@ -1205,7 +1213,7 @@ __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
{
mark_external(rq);
return i915_sw_fence_await_dma_fence(&rq->submit, fence,
- i915_fence_context_timeout(rq->i915,
+ i915_fence_context_timeout(rq->engine->i915,
fence->context),
I915_FENCE_GFP);
}
@@ -1776,7 +1784,8 @@ long i915_request_wait(struct i915_request *rq,
* (bad for battery).
*/
if (flags & I915_WAIT_PRIORITY) {
- if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
+ if (!i915_request_started(rq) &&
+ INTEL_GEN(rq->engine->i915) >= 6)
intel_rps_boost(rq);
}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 8ec7ee4dbadc..590762820761 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -162,9 +162,6 @@ struct i915_request {
struct dma_fence fence;
spinlock_t lock;
- /** On Which ring this request was generated */
- struct drm_i915_private *i915;
-
/**
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
@@ -214,7 +211,7 @@ struct i915_request {
ktime_t emitted;
} duration;
};
- struct list_head execute_cb;
+ struct llist_head execute_cb;
struct i915_sw_fence semaphore;
/*
@@ -564,7 +561,7 @@ static inline void i915_request_clear_hold(struct i915_request *rq)
}
static inline struct intel_timeline *
-i915_request_timeline(struct i915_request *rq)
+i915_request_timeline(const struct i915_request *rq)
{
/* Valid only while the request is being constructed (or retired). */
return rcu_dereference_protected(rq->timeline,
@@ -572,14 +569,14 @@ i915_request_timeline(struct i915_request *rq)
}
static inline struct i915_gem_context *
-i915_request_gem_context(struct i915_request *rq)
+i915_request_gem_context(const struct i915_request *rq)
{
/* Valid only while the request is being constructed (or retired). */
return rcu_dereference_protected(rq->context->gem_context, true);
}
static inline struct intel_timeline *
-i915_request_active_timeline(struct i915_request *rq)
+i915_request_active_timeline(const struct i915_request *rq)
{
/*
* When in use during submission, we are protected by a guarantee that
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index bc854ad60954..a4addcc64978 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -735,7 +735,7 @@ TRACE_EVENT(i915_request_queue,
),
TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
+ __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -761,7 +761,7 @@ DECLARE_EVENT_CLASS(i915_request,
),
TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
+ __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -804,7 +804,7 @@ TRACE_EVENT(i915_request_in,
),
TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
+ __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -833,7 +833,7 @@ TRACE_EVENT(i915_request_out,
),
TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
+ __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -895,7 +895,7 @@ TRACE_EVENT(i915_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
- __entry->dev = rq->i915->drm.primary->index;
+ __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index e28eae4a8f70..4c305d838016 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -49,6 +49,16 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
}
}
+void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint)
+{
+ __i915_printk(i915, KERN_NOTICE, "CI tainted:%#x by %pS\n",
+ taint, (void *)_RET_IP_);
+
+ /* Failures that occur during fault injection testing are expected */
+ if (!i915_error_injected())
+ __add_taint_for_CI(taint);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
static unsigned int i915_probe_fail_count;
@@ -91,7 +101,7 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout)
return;
}
- timeout = msecs_to_jiffies_timeout(timeout);
+ timeout = msecs_to_jiffies(timeout);
/*
* Paranoia to make sure the compiler computes the timeout before
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 03a73d2bd50d..54773371e6bd 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -266,19 +266,6 @@ static inline int list_is_last_rcu(const struct list_head *list,
return READ_ONCE(list->next) == head;
}
-/*
- * Wait until the work is finally complete, even if it tries to postpone
- * by requeueing itself. Note, that if the worker never cancels itself,
- * we will spin forever.
- */
-static inline void drain_delayed_work(struct delayed_work *dw)
-{
- do {
- while (flush_delayed_work(dw))
- ;
- } while (delayed_work_pending(dw));
-}
-
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
unsigned long j = msecs_to_jiffies(m);
@@ -436,7 +423,8 @@ static inline const char *enableddisabled(bool v)
return v ? "enabled" : "disabled";
}
-static inline void add_taint_for_CI(unsigned int taint)
+void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
+static inline void __add_taint_for_CI(unsigned int taint)
{
/*
* The system is "ok", just about surviving for the user, but
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 1f9cd33b35cb..bc64f773dcdb 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -304,7 +304,7 @@ static int __vma_bind(struct dma_fence_work *work)
struct i915_vma *vma = vw->vma;
int err;
- err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
+ err = vma->ops->bind_vma(vma->vm, vma, vw->cache_level, vw->flags);
if (err)
atomic_or(I915_VMA_ERROR, &vma->flags);
@@ -394,22 +394,20 @@ int i915_vma_bind(struct i915_vma *vma,
vma_flags = atomic_read(&vma->flags);
vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- if (flags & PIN_UPDATE)
- bind_flags |= vma_flags;
- else
- bind_flags &= ~vma_flags;
+
+ bind_flags &= ~vma_flags;
if (bind_flags == 0)
return 0;
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags);
- if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
+ if (work && bind_flags & vma->vm->bind_async_flags) {
struct dma_fence *prev;
work->vma = vma;
work->cache_level = cache_level;
- work->flags = bind_flags | I915_VMA_ALLOC;
+ work->flags = bind_flags;
/*
* Note we only want to chain up to the migration fence on
@@ -435,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma,
work->pinned = vma->obj;
}
} else {
- ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+ ret = vma->ops->bind_vma(vma->vm, vma, cache_level, bind_flags);
if (ret)
return ret;
}
@@ -865,7 +863,6 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
- GEM_BUG_ON(flags & PIN_UPDATE);
GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
/* First try and grab the pin without rebinding the vma */
@@ -1087,7 +1084,8 @@ void i915_vma_release(struct kref *ref)
spin_lock(&obj->vma.lock);
list_del(&vma->obj_link);
- rb_erase(&vma->obj_node, &obj->vma.tree);
+ if (!RB_EMPTY_NODE(&vma->obj_node))
+ rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
}
@@ -1229,31 +1227,9 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return 0;
}
-int __i915_vma_unbind(struct i915_vma *vma)
+void __i915_vma_evict(struct i915_vma *vma)
{
- int ret;
-
- lockdep_assert_held(&vma->vm->mutex);
-
- if (i915_vma_is_pinned(vma)) {
- vma_print_allocator(vma, "is pinned");
- return -EAGAIN;
- }
-
- /*
- * After confirming that no one else is pinning this vma, wait for
- * any laggards who may have crept in during the wait (through
- * a residual pin skipping the vm->mutex) to complete.
- */
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
-
- if (!drm_mm_node_allocated(&vma->node))
- return 0;
-
GEM_BUG_ON(i915_vma_is_pinned(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_map_and_fenceable(vma)) {
/* Force a pagefault for domain tracking on next user access */
@@ -1285,13 +1261,40 @@ int __i915_vma_unbind(struct i915_vma *vma)
if (likely(atomic_read(&vma->vm->open))) {
trace_i915_vma_unbind(vma);
- vma->ops->unbind_vma(vma);
+ vma->ops->unbind_vma(vma->vm, vma);
}
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags);
i915_vma_detach(vma);
vma_unbind_pages(vma);
+}
+
+int __i915_vma_unbind(struct i915_vma *vma)
+{
+ int ret;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
+ return -EAGAIN;
+ }
+
+ /*
+ * After confirming that no one else is pinning this vma, wait for
+ * any laggards who may have crept in during the wait (through
+ * a residual pin skipping the vm->mutex) to complete.
+ */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ __i915_vma_evict(vma);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
@@ -1303,13 +1306,13 @@ int i915_vma_unbind(struct i915_vma *vma)
intel_wakeref_t wakeref = 0;
int err;
- if (!drm_mm_node_allocated(&vma->node))
- return 0;
-
/* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma);
if (err)
- goto out_rpm;
+ return err;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 8ad1daabcd58..d0d01f909548 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -203,6 +203,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+void __i915_vma_evict(struct i915_vma *vma);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 63831cdb7402..9e9082dc8f4b 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -235,7 +235,6 @@ struct i915_vma {
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
#define I915_VMA_ALLOC_BIT 12
-#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
#define I915_VMA_ERROR_BIT 13
#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 8a635bd4d5d8..40c590db3c76 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -26,6 +26,7 @@
#include <drm/i915_pciids.h>
#include "display/intel_cdclk.h"
+#include "display/intel_de.h"
#include "intel_device_info.h"
#include "i915_drv.h"
@@ -57,10 +58,13 @@ static const char * const platform_names[] = {
PLATFORM_NAME(KABYLAKE),
PLATFORM_NAME(GEMINILAKE),
PLATFORM_NAME(COFFEELAKE),
+ PLATFORM_NAME(COMETLAKE),
PLATFORM_NAME(CANNONLAKE),
PLATFORM_NAME(ICELAKE),
PLATFORM_NAME(ELKHARTLAKE),
PLATFORM_NAME(TIGERLAKE),
+ PLATFORM_NAME(ROCKETLAKE),
+ PLATFORM_NAME(DG1),
};
#undef PLATFORM_NAME
@@ -89,7 +93,6 @@ static const char *iommu_name(void)
void intel_device_info_print_static(const struct intel_device_info *info,
struct drm_printer *p)
{
- drm_printf(p, "engines: %x\n", info->engine_mask);
drm_printf(p, "gen: %d\n", info->gen);
drm_printf(p, "gt: %d\n", info->gt);
drm_printf(p, "iommu: %s\n", iommu_name());
@@ -109,571 +112,18 @@ void intel_device_info_print_static(const struct intel_device_info *info,
#undef PRINT_FLAG
}
-static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
-{
- int s;
-
- drm_printf(p, "slice total: %u, mask=%04x\n",
- hweight8(sseu->slice_mask), sseu->slice_mask);
- drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
- for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
- s, intel_sseu_subslices_per_slice(sseu, s),
- intel_sseu_get_subslices(sseu, s));
- }
- drm_printf(p, "EU total: %u\n", sseu->eu_total);
- drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
- drm_printf(p, "has slice power gating: %s\n",
- yesno(sseu->has_slice_pg));
- drm_printf(p, "has subslice power gating: %s\n",
- yesno(sseu->has_subslice_pg));
- drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
-}
-
void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p)
{
- sseu_dump(&info->sseu, p);
-
drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
drm_printf(p, "CS timestamp frequency: %u Hz\n",
info->cs_timestamp_frequency_hz);
}
-static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
- int subslice)
-{
- int slice_stride = sseu->max_subslices * sseu->eu_stride;
-
- return slice * slice_stride + subslice * sseu->eu_stride;
-}
-
-static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
- int subslice)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
- u16 eu_mask = 0;
-
- for (i = 0; i < sseu->eu_stride; i++) {
- eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
- (i * BITS_PER_BYTE);
- }
-
- return eu_mask;
-}
-
-static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
- u16 eu_mask)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
-
- for (i = 0; i < sseu->eu_stride; i++) {
- sseu->eu_mask[offset + i] =
- (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
- }
-}
-
-void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
- struct drm_printer *p)
-{
- int s, ss;
-
- if (sseu->max_slices == 0) {
- drm_printf(p, "Unavailable\n");
- return;
- }
-
- for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
- s, intel_sseu_subslices_per_slice(sseu, s),
- intel_sseu_get_subslices(sseu, s));
-
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- u16 enabled_eus = sseu_get_eus(sseu, s, ss);
-
- drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
- ss, hweight16(enabled_eus), enabled_eus);
- }
- }
-}
-
-static u16 compute_eu_total(const struct sseu_dev_info *sseu)
-{
- u16 i, total = 0;
-
- for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
- total += hweight8(sseu->eu_mask[i]);
-
- return total;
-}
-
-static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
- u8 s_en, u32 ss_en, u16 eu_en)
-{
- int s, ss;
-
- /* ss_en represents entire subslice mask across all slices */
- GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
- sizeof(ss_en) * BITS_PER_BYTE);
-
- for (s = 0; s < sseu->max_slices; s++) {
- if ((s_en & BIT(s)) == 0)
- continue;
-
- sseu->slice_mask |= BIT(s);
-
- intel_sseu_set_subslices(sseu, s, ss_en);
-
- for (ss = 0; ss < sseu->max_subslices; ss++)
- if (intel_sseu_has_subslice(sseu, s, ss))
- sseu_set_eus(sseu, s, ss, eu_en);
- }
- sseu->eu_per_subslice = hweight16(eu_en);
- sseu->eu_total = compute_eu_total(sseu);
-}
-
-static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- u8 s_en;
- u32 dss_en;
- u16 eu_en = 0;
- u8 eu_en_fuse;
- int eu;
-
- /*
- * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
- * Instead of splitting these, provide userspace with an array
- * of DSS to more closely represent the hardware resource.
- */
- intel_sseu_set_info(sseu, 1, 6, 16);
-
- s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
-
- dss_en = I915_READ(GEN12_GT_DSS_ENABLE);
-
- /* one bit per pair of EUs */
- eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
- for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
- if (eu_en_fuse & BIT(eu))
- eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
-
- gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
-
- /* TGL only supports slice-level power gating */
- sseu->has_slice_pg = 1;
-}
-
-static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- u8 s_en;
- u32 ss_en;
- u8 eu_en;
-
- if (IS_ELKHARTLAKE(dev_priv))
- intel_sseu_set_info(sseu, 1, 4, 8);
- else
- intel_sseu_set_info(sseu, 1, 8, 8);
-
- s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
- ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
- eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
-
- gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
-
- /* ICL has no power gating restrictions. */
- sseu->has_slice_pg = 1;
- sseu->has_subslice_pg = 1;
- sseu->has_eu_pg = 1;
-}
-
-static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- const u32 fuse2 = I915_READ(GEN8_FUSE2);
- int s, ss;
- const int eu_mask = 0xff;
- u32 subslice_mask, eu_en;
-
- intel_sseu_set_info(sseu, 6, 4, 8);
-
- sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
- GEN10_F2_S_ENA_SHIFT;
-
- /* Slice0 */
- eu_en = ~I915_READ(GEN8_EU_DISABLE0);
- for (ss = 0; ss < sseu->max_subslices; ss++)
- sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
- /* Slice1 */
- sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
- eu_en = ~I915_READ(GEN8_EU_DISABLE1);
- sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
- /* Slice2 */
- sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
- sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
- /* Slice3 */
- sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
- eu_en = ~I915_READ(GEN8_EU_DISABLE2);
- sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
- /* Slice4 */
- sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
- sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
- /* Slice5 */
- sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
- eu_en = ~I915_READ(GEN10_EU_DISABLE3);
- sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
-
- subslice_mask = (1 << 4) - 1;
- subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
- GEN10_F2_SS_DIS_SHIFT);
-
- for (s = 0; s < sseu->max_slices; s++) {
- u32 subslice_mask_with_eus = subslice_mask;
-
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- if (sseu_get_eus(sseu, s, ss) == 0)
- subslice_mask_with_eus &= ~BIT(ss);
- }
-
- /*
- * Slice0 can have up to 3 subslices, but there are only 2 in
- * slice1/2.
- */
- intel_sseu_set_subslices(sseu, s, s == 0 ?
- subslice_mask_with_eus :
- subslice_mask_with_eus & 0x3);
- }
-
- sseu->eu_total = compute_eu_total(sseu);
-
- /*
- * CNL is expected to always have a uniform distribution
- * of EU across subslices with the exception that any one
- * EU in any one subslice may be fused off for die
- * recovery.
- */
- sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
- DIV_ROUND_UP(sseu->eu_total,
- intel_sseu_subslice_total(sseu)) :
- 0;
-
- /* No restrictions on Power Gating */
- sseu->has_slice_pg = 1;
- sseu->has_subslice_pg = 1;
- sseu->has_eu_pg = 1;
-}
-
-static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- u32 fuse;
- u8 subslice_mask = 0;
-
- fuse = I915_READ(CHV_FUSE_GT);
-
- sseu->slice_mask = BIT(0);
- intel_sseu_set_info(sseu, 1, 2, 8);
-
- if (!(fuse & CHV_FGT_DISABLE_SS0)) {
- u8 disabled_mask =
- ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
- CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
- (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
- CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
-
- subslice_mask |= BIT(0);
- sseu_set_eus(sseu, 0, 0, ~disabled_mask);
- }
-
- if (!(fuse & CHV_FGT_DISABLE_SS1)) {
- u8 disabled_mask =
- ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
- CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
- (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
- CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
-
- subslice_mask |= BIT(1);
- sseu_set_eus(sseu, 0, 1, ~disabled_mask);
- }
-
- intel_sseu_set_subslices(sseu, 0, subslice_mask);
-
- sseu->eu_total = compute_eu_total(sseu);
-
- /*
- * CHV expected to always have a uniform distribution of EU
- * across subslices.
- */
- sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
- sseu->eu_total /
- intel_sseu_subslice_total(sseu) :
- 0;
- /*
- * CHV supports subslice power gating on devices with more than
- * one subslice, and supports EU power gating on devices with
- * more than one EU pair per subslice.
- */
- sseu->has_slice_pg = 0;
- sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
- sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
-}
-
-static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- int s, ss;
- u32 fuse2, eu_disable, subslice_mask;
- const u8 eu_mask = 0xff;
-
- fuse2 = I915_READ(GEN8_FUSE2);
- sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-
- /* BXT has a single slice and at most 3 subslices. */
- intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
- IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
-
- /*
- * The subslice disable field is global, i.e. it applies
- * to each of the enabled slices.
- */
- subslice_mask = (1 << sseu->max_subslices) - 1;
- subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
- GEN9_F2_SS_DIS_SHIFT);
-
- /*
- * Iterate through enabled slices and subslices to
- * count the total enabled EU.
- */
- for (s = 0; s < sseu->max_slices; s++) {
- if (!(sseu->slice_mask & BIT(s)))
- /* skip disabled slice */
- continue;
-
- intel_sseu_set_subslices(sseu, s, subslice_mask);
-
- eu_disable = I915_READ(GEN9_EU_DISABLE(s));
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- int eu_per_ss;
- u8 eu_disabled_mask;
-
- if (!intel_sseu_has_subslice(sseu, s, ss))
- /* skip disabled subslice */
- continue;
-
- eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
-
- sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
-
- eu_per_ss = sseu->max_eus_per_subslice -
- hweight8(eu_disabled_mask);
-
- /*
- * Record which subslice(s) has(have) 7 EUs. we
- * can tune the hash used to spread work among
- * subslices if they are unbalanced.
- */
- if (eu_per_ss == 7)
- sseu->subslice_7eu[s] |= BIT(ss);
- }
- }
-
- sseu->eu_total = compute_eu_total(sseu);
-
- /*
- * SKL is expected to always have a uniform distribution
- * of EU across subslices with the exception that any one
- * EU in any one subslice may be fused off for die
- * recovery. BXT is expected to be perfectly uniform in EU
- * distribution.
- */
- sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
- DIV_ROUND_UP(sseu->eu_total,
- intel_sseu_subslice_total(sseu)) :
- 0;
- /*
- * SKL+ supports slice power gating on devices with more than
- * one slice, and supports EU power gating on devices with
- * more than one EU pair per subslice. BXT+ supports subslice
- * power gating on devices with more than one subslice, and
- * supports EU power gating on devices with more than one EU
- * pair per subslice.
- */
- sseu->has_slice_pg =
- !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
- sseu->has_subslice_pg =
- IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
- sseu->has_eu_pg = sseu->eu_per_subslice > 2;
-
- if (IS_GEN9_LP(dev_priv)) {
-#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
- info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
-
- sseu->min_eu_in_pool = 0;
- if (info->has_pooled_eu) {
- if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
- sseu->min_eu_in_pool = 3;
- else if (IS_SS_DISABLED(1))
- sseu->min_eu_in_pool = 6;
- else
- sseu->min_eu_in_pool = 9;
- }
-#undef IS_SS_DISABLED
- }
-}
-
-static void bdw_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- int s, ss;
- u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
-
- fuse2 = I915_READ(GEN8_FUSE2);
- sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- intel_sseu_set_info(sseu, 3, 3, 8);
-
- /*
- * The subslice disable field is global, i.e. it applies
- * to each of the enabled slices.
- */
- subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
- subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
- GEN8_F2_SS_DIS_SHIFT);
-
- eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
- eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
- ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
- (32 - GEN8_EU_DIS0_S1_SHIFT));
- eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
- ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
- (32 - GEN8_EU_DIS1_S2_SHIFT));
-
- /*
- * Iterate through enabled slices and subslices to
- * count the total enabled EU.
- */
- for (s = 0; s < sseu->max_slices; s++) {
- if (!(sseu->slice_mask & BIT(s)))
- /* skip disabled slice */
- continue;
-
- intel_sseu_set_subslices(sseu, s, subslice_mask);
-
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- u8 eu_disabled_mask;
- u32 n_disabled;
-
- if (!intel_sseu_has_subslice(sseu, s, ss))
- /* skip disabled subslice */
- continue;
-
- eu_disabled_mask =
- eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
-
- sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
-
- n_disabled = hweight8(eu_disabled_mask);
-
- /*
- * Record which subslices have 7 EUs.
- */
- if (sseu->max_eus_per_subslice - n_disabled == 7)
- sseu->subslice_7eu[s] |= 1 << ss;
- }
- }
-
- sseu->eu_total = compute_eu_total(sseu);
-
- /*
- * BDW is expected to always have a uniform distribution of EU across
- * subslices with the exception that any one EU in any one subslice may
- * be fused off for die recovery.
- */
- sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
- DIV_ROUND_UP(sseu->eu_total,
- intel_sseu_subslice_total(sseu)) :
- 0;
-
- /*
- * BDW supports slice power gating on devices with more than
- * one slice.
- */
- sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
- sseu->has_subslice_pg = 0;
- sseu->has_eu_pg = 0;
-}
-
-static void hsw_sseu_info_init(struct drm_i915_private *dev_priv)
-{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- u32 fuse1;
- u8 subslice_mask = 0;
- int s, ss;
-
- /*
- * There isn't a register to tell us how many slices/subslices. We
- * work off the PCI-ids here.
- */
- switch (INTEL_INFO(dev_priv)->gt) {
- default:
- MISSING_CASE(INTEL_INFO(dev_priv)->gt);
- /* fall through */
- case 1:
- sseu->slice_mask = BIT(0);
- subslice_mask = BIT(0);
- break;
- case 2:
- sseu->slice_mask = BIT(0);
- subslice_mask = BIT(0) | BIT(1);
- break;
- case 3:
- sseu->slice_mask = BIT(0) | BIT(1);
- subslice_mask = BIT(0) | BIT(1);
- break;
- }
-
- fuse1 = I915_READ(HSW_PAVP_FUSE1);
- switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
- default:
- MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
- HSW_F1_EU_DIS_SHIFT);
- /* fall through */
- case HSW_F1_EU_DIS_10EUS:
- sseu->eu_per_subslice = 10;
- break;
- case HSW_F1_EU_DIS_8EUS:
- sseu->eu_per_subslice = 8;
- break;
- case HSW_F1_EU_DIS_6EUS:
- sseu->eu_per_subslice = 6;
- break;
- }
-
- intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
- hweight8(subslice_mask),
- sseu->eu_per_subslice);
-
- for (s = 0; s < sseu->max_slices; s++) {
- intel_sseu_set_subslices(sseu, s, subslice_mask);
-
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- sseu_set_eus(sseu, s, ss,
- (1UL << sseu->eu_per_subslice) - 1);
- }
- }
-
- sseu->eu_total = compute_eu_total(sseu);
-
- /* No powergating for you. */
- sseu->has_slice_pg = 0;
- sseu->has_subslice_pg = 0;
- sseu->has_eu_pg = 0;
-}
-
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
{
- u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
+ u32 ts_override = intel_uncore_read(&dev_priv->uncore,
+ GEN9_TIMESTAMP_OVERRIDE);
u32 base_freq, frac_freq;
base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
@@ -736,6 +186,7 @@ static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 f12_5_mhz = 12500000;
u32 f19_2_mhz = 19200000;
u32 f24_mhz = 24000000;
@@ -757,7 +208,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
*/
return f12_5_mhz;
} else if (INTEL_GEN(dev_priv) <= 9) {
- u32 ctc_reg = I915_READ(CTC_MODE);
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
@@ -775,7 +226,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
return freq;
} else if (INTEL_GEN(dev_priv) <= 12) {
- u32 ctc_reg = I915_READ(CTC_MODE);
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
/* First figure out the reference frequency. There are 2 ways
@@ -786,7 +237,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(dev_priv);
} else {
- u32 rpm_config_reg = I915_READ(RPM_CONFIG0);
+ u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0);
if (INTEL_GEN(dev_priv) <= 10)
freq = gen10_get_crystal_clock_freq(dev_priv,
@@ -933,7 +384,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (IS_ROCKETLAKE(dev_priv))
+ for_each_pipe(dev_priv, pipe)
+ runtime->num_sprites[pipe] = 4;
+ else if (INTEL_GEN(dev_priv) >= 11)
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 6;
else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
@@ -962,8 +416,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
HAS_PCH_SPLIT(dev_priv)) {
- u32 fuse_strap = I915_READ(FUSE_STRAP);
- u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 sfuse_strap = intel_de_read(dev_priv, SFUSE_STRAP);
/*
* SFUSE_STRAP is supposed to have a bit signalling the display
@@ -988,7 +442,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
- u32 dfsm = I915_READ(SKL_DFSM);
+ u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
info->pipe_mask &= ~BIT(PIPE_A);
@@ -1022,22 +476,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->display.has_dsc = 0;
}
- /* Initialize slice/subslice/EU info */
- if (IS_HASWELL(dev_priv))
- hsw_sseu_info_init(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- cherryview_sseu_info_init(dev_priv);
- else if (IS_BROADWELL(dev_priv))
- bdw_sseu_info_init(dev_priv);
- else if (IS_GEN(dev_priv, 9))
- gen9_sseu_info_init(dev_priv);
- else if (IS_GEN(dev_priv, 10))
- gen10_sseu_info_init(dev_priv);
- else if (IS_GEN(dev_priv, 11))
- gen11_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 12)
- gen12_sseu_info_init(dev_priv);
-
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
drm_info(&dev_priv->drm,
"Disabling ppGTT for VT-d support\n");
@@ -1068,67 +506,3 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
yesno(caps->has_logical_contexts));
drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
-
-/*
- * Determine which engines are fused off in our particular hardware. Since the
- * fuse register is in the blitter powerwell, we need forcewake to be ready at
- * this point (but later we need to prune the forcewake domains for engines that
- * are indeed fused off).
- */
-void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
-{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
- unsigned int logical_vdbox = 0;
- unsigned int i;
- u32 media_fuse;
- u16 vdbox_mask;
- u16 vebox_mask;
-
- if (INTEL_GEN(dev_priv) < 11)
- return;
-
- media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
-
- vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
-
- for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(dev_priv, _VCS(i))) {
- vdbox_mask &= ~BIT(i);
- continue;
- }
-
- if (!(BIT(i) & vdbox_mask)) {
- info->engine_mask &= ~BIT(_VCS(i));
- drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i);
- continue;
- }
-
- /*
- * In Gen11, only even numbered logical VDBOXes are
- * hooked up to an SFC (Scaler & Format Converter) unit.
- * In TGL each VDBOX has access to an SFC.
- */
- if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
- RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
- }
- drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n",
- vdbox_mask, VDBOX_MASK(dev_priv));
- GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
-
- for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(dev_priv, _VECS(i))) {
- vebox_mask &= ~BIT(i);
- continue;
- }
-
- if (!(BIT(i) & vebox_mask)) {
- info->engine_mask &= ~BIT(_VECS(i));
- drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i);
- }
- }
- drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n",
- vebox_mask, VEBOX_MASK(dev_priv));
- GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
-}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 62e03ffa377e..fd2385457ab6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -73,6 +73,7 @@ enum intel_platform {
INTEL_KABYLAKE,
INTEL_GEMINILAKE,
INTEL_COFFEELAKE,
+ INTEL_COMETLAKE,
/* gen10 */
INTEL_CANNONLAKE,
/* gen11 */
@@ -80,6 +81,8 @@ enum intel_platform {
INTEL_ELKHARTLAKE,
/* gen12 */
INTEL_TIGERLAKE,
+ INTEL_ROCKETLAKE,
+ INTEL_DG1,
INTEL_MAX_PLATFORMS
};
@@ -120,6 +123,7 @@ enum intel_ppgtt_type {
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
func(has_logical_ring_preemption); \
+ func(has_master_unit_irq); \
func(has_pooled_eu); \
func(has_rc6); \
func(has_rc6p); \
@@ -146,6 +150,7 @@ enum intel_ppgtt_type {
func(has_modular_fia); \
func(has_overlay); \
func(has_psr); \
+ func(has_psr_hw_tracking); \
func(overlay_needs_physical); \
func(supports_tv);
@@ -154,7 +159,7 @@ struct intel_device_info {
u8 gen;
u8 gt; /* GT number, 0 if undefined */
- intel_engine_mask_t engine_mask; /* Engines supported by the HW */
+ intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
enum intel_platform platform;
@@ -172,6 +177,8 @@ struct intel_device_info {
u8 pipe_mask;
u8 cpu_transcoder_mask;
+ u8 abox_mask;
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
@@ -214,18 +221,10 @@ struct intel_runtime_info {
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
- u8 num_engines;
-
- /* Slice/subslice/EU info */
- struct sseu_dev_info sseu;
-
u32 rawclk_freq;
u32 cs_timestamp_frequency_hz;
u32 cs_timestamp_period_ns;
-
- /* Media engine access to SFC per instance */
- u8 vdbox_sfc_access;
};
struct intel_driver_caps {
@@ -242,10 +241,6 @@ void intel_device_info_print_static(const struct intel_device_info *info,
struct drm_printer *p);
void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p);
-void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
- struct drm_printer *p);
-
-void intel_device_info_init_mmio(struct drm_i915_private *dev_priv);
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 21b91313cc5d..99fe8aef1c67 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -52,6 +52,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_COFFEELAKE(dev_priv))
return true;
+ if (IS_COMETLAKE(dev_priv))
+ return true;
return false;
}
@@ -64,7 +66,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
*/
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
{
- if (!i915_modparams.enable_gvt)
+ if (!dev_priv->params.enable_gvt)
return;
if (intel_vgpu_active(dev_priv)) {
@@ -80,7 +82,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
return;
bail:
- i915_modparams.enable_gvt = 0;
+ dev_priv->params.enable_gvt = 0;
}
/**
@@ -100,7 +102,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- if (!i915_modparams.enable_gvt) {
+ if (!dev_priv->params.enable_gvt) {
drm_dbg(&dev_priv->drm,
"GVT-g is disabled by kernel params\n");
return 0;
@@ -121,7 +123,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
bail:
- i915_modparams.enable_gvt = 0;
+ dev_priv->params.enable_gvt = 0;
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 20ab9a5023b5..6c97192e9ca8 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -64,36 +64,49 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ !IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_KBP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ !IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
/* KBP is SPT compatible */
return PCH_SPT;
case INTEL_PCH_CNP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
- drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_CANNONLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm,
"Found Cannon Lake LP PCH (CNP-LP)\n");
- drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_CANNONLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
- drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv) &&
+ !IS_ROCKETLAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
- drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
@@ -107,7 +120,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
+ !IS_ROCKETLAKE(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
@@ -141,13 +155,15 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_TIGERLAKE(dev_priv))
+ if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
else if (IS_ELKHARTLAKE(dev_priv))
id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
else if (IS_ICELAKE(dev_priv))
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
- else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ else if (IS_CANNONLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
@@ -172,6 +188,12 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
{
struct pci_dev *pch = NULL;
+ /* DG1 has south engine display on the same PCI device */
+ if (IS_DG1(dev_priv)) {
+ dev_priv->pch_type = PCH_DG1;
+ return;
+ }
+
/*
* The reason to probe ISA bridge instead of Dev31:Fun0 is to
* make graphics device passthrough work easy for VMM, that only
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 3053d1ce398b..06d2cd50af0b 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -26,6 +26,9 @@ enum intel_pch {
PCH_JSP, /* Jasper Lake PCH */
PCH_MCC, /* Mule Creek Canyon PCH */
PCH_TGP, /* Tiger Lake PCH */
+
+ /* Fake PCHs, functionality handled on the same PCI dev */
+ PCH_DG1 = 1024,
};
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
@@ -56,6 +59,7 @@ enum intel_pch {
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 07f663cd2d1c..cfabbe0481ab 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -33,6 +33,7 @@
#include <drm/drm_plane_helper.h>
#include "display/intel_atomic.h"
+#include "display/intel_bw.h"
#include "display/intel_display_types.h"
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
@@ -43,7 +44,6 @@
#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
-#include "display/intel_bw.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
@@ -94,16 +94,13 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
- /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
- /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
+ /*
+ * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
+ * Display WA #0859: skl,bxt,kbl,glk,cfl
+ */
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
- DISP_FBC_WM_DIS |
DISP_FBC_MEMORY_WAKE);
- /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
- ILK_DPFC_DISABLE_DUMMY0);
-
if (IS_SKYLAKE(dev_priv)) {
/* WaDisableDopClockGating */
I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
@@ -140,6 +137,20 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
* application, using batch buffers or any other means.
*/
I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
+
+ /*
+ * WaFbcTurnOffFbcWatermark:bxt
+ * Display WA #0562: bxt
+ */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS);
+
+ /*
+ * WaFbcHighMemBwCorruptionAvoidance:bxt
+ * Display WA #0883: bxt
+ */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_DISABLE_DUMMY0);
}
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -1345,6 +1356,23 @@ static void g4x_invalidate_wms(struct intel_crtc *crtc,
}
}
+static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
+ int level)
+{
+ if (level < G4X_WM_LEVEL_SR)
+ return false;
+
+ if (level >= G4X_WM_LEVEL_SR &&
+ wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
+ return false;
+
+ if (level >= G4X_WM_LEVEL_HPLL &&
+ wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
+ return false;
+
+ return true;
+}
+
static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1384,7 +1412,6 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
wm_state->wm.plane[plane_id] = raw->plane[plane_id];
level = G4X_WM_LEVEL_SR;
-
if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
goto out;
@@ -1396,7 +1423,6 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
level = G4X_WM_LEVEL_HPLL;
-
if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
goto out;
@@ -1419,17 +1445,11 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
/*
* Determine if the FBC watermark(s) can be used. IF
* this isn't the case we prefer to disable the FBC
- ( watermark(s) rather than disable the SR/HPLL
- * level(s) entirely.
+ * watermark(s) rather than disable the SR/HPLL
+ * level(s) entirely. 'level-1' is the highest valid
+ * level here.
*/
- wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
-
- if (level >= G4X_WM_LEVEL_SR &&
- wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
- wm_state->fbc_en = false;
- else if (level >= G4X_WM_LEVEL_HPLL &&
- wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
- wm_state->fbc_en = false;
+ wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
return 0;
}
@@ -1437,6 +1457,7 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
struct intel_atomic_state *intel_state =
@@ -1465,8 +1486,8 @@ static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
max(optimal->wm.plane[plane_id],
active->wm.plane[plane_id]);
- WARN_ON(intermediate->wm.plane[plane_id] >
- g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
+ drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
}
intermediate->sr.plane = max(optimal->sr.plane,
@@ -1483,21 +1504,25 @@ static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
intermediate->hpll.fbc = max(optimal->hpll.fbc,
active->hpll.fbc);
- WARN_ON((intermediate->sr.plane >
- g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
- intermediate->sr.cursor >
- g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
- intermediate->cxsr);
- WARN_ON((intermediate->sr.plane >
- g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
- intermediate->sr.cursor >
- g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
- intermediate->hpll_en);
-
- WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
- intermediate->fbc_en && intermediate->cxsr);
- WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
- intermediate->fbc_en && intermediate->hpll_en);
+ drm_WARN_ON(&dev_priv->drm,
+ (intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
+ intermediate->cxsr);
+ drm_WARN_ON(&dev_priv->drm,
+ (intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
+ intermediate->hpll_en);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
+ intermediate->fbc_en && intermediate->cxsr);
+ drm_WARN_ON(&dev_priv->drm,
+ intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
+ intermediate->fbc_en && intermediate->hpll_en);
out:
/*
@@ -1681,6 +1706,7 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1749,11 +1775,11 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
fifo_left -= plane_extra;
}
- WARN_ON(active_planes != 0 && fifo_left != 0);
+ drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
/* give it all to the first plane if none are active */
if (active_planes == 0) {
- WARN_ON(fifo_left != fifo_size);
+ drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
fifo_state->plane[PLANE_PRIMARY] = fifo_left;
}
@@ -4025,10 +4051,9 @@ icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
return offset;
}
-static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
+u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
{
u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
-
drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
if (INTEL_GEN(dev_priv) < 11)
@@ -4037,10 +4062,38 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
return ddb_size;
}
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
+ const struct skl_ddb_entry *entry)
+{
+ u32 slice_mask = 0;
+ u16 ddb_size = intel_get_ddb_size(dev_priv);
+ u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ u16 slice_size = ddb_size / num_supported_slices;
+ u16 start_slice;
+ u16 end_slice;
+
+ if (!skl_ddb_entry_size(entry))
+ return 0;
+
+ start_slice = entry->start / slice_size;
+ end_slice = (entry->end - 1) / slice_size;
+
+ /*
+ * Per plane DDB entry can in a really worst case be on multiple slices
+ * but single entry is anyway contigious.
+ */
+ while (start_slice <= end_slice) {
+ slice_mask |= BIT(start_slice);
+ start_slice++;
+ }
+
+ return slice_mask;
+}
+
static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
u8 active_pipes);
-static void
+static int
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
@@ -4053,30 +4106,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc *crtc;
u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(intel_state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(intel_state);
+ u8 active_pipes = new_dbuf_state->active_pipes;
u16 ddb_size;
u32 ddb_range_size;
u32 i;
u32 dbuf_slice_mask;
- u32 active_pipes;
u32 offset;
u32 slice_size;
u32 total_slice_mask;
u32 start, end;
+ int ret;
+
+ *num_active = hweight8(active_pipes);
- if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
+ if (!crtc_state->hw.active) {
alloc->start = 0;
alloc->end = 0;
- *num_active = hweight8(dev_priv->active_pipes);
- return;
+ return 0;
}
- if (intel_state->active_pipe_changes)
- active_pipes = intel_state->active_pipes;
- else
- active_pipes = dev_priv->active_pipes;
-
- *num_active = hweight8(active_pipes);
-
ddb_size = intel_get_ddb_size(dev_priv);
slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
@@ -4089,13 +4141,16 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* that changes the active CRTC list or do modeset would need to
* grab _all_ crtc locks, including the one we currently hold.
*/
- if (!intel_state->active_pipe_changes && !intel_state->modeset) {
+ if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes &&
+ !dev_priv->wm.distrust_bios_wm) {
/*
* alloc may be cleared by clear_intel_crtc_state,
* copy from old state to be sure
+ *
+ * FIXME get rid of this mess
*/
*alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
- return;
+ return 0;
}
/*
@@ -4103,10 +4158,6 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
*/
dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
- DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
- dbuf_slice_mask,
- pipe_name(for_pipe), active_pipes);
-
/*
* Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
* and slice size is 1024, the offset would be 1024
@@ -4174,7 +4225,13 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* FIXME: For now we always enable slice S1 as per
* the Bspec display initialization sequence.
*/
- intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
+ new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
end = ddb_range_size *
@@ -4183,11 +4240,12 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
alloc->start = offset + start;
alloc->end = offset + end;
- DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
- alloc->start, alloc->end);
- DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
- intel_state->enabled_dbuf_slices_mask,
- INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
+ for_crtc->base.id, for_crtc->name,
+ dbuf_slice_mask, alloc->start, alloc->end, active_pipes);
+
+ return 0;
}
static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
@@ -4308,12 +4366,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
intel_display_power_put(dev_priv, power_domain, wakeref);
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
-{
- dev_priv->enabled_dbuf_slices_mask =
- intel_enabled_dbuf_slices_mask(dev_priv);
-}
-
/*
* Determines the downscale amount of a plane for the purposes of watermark calculations.
* The bspec defines downscale amount as:
@@ -4334,11 +4386,13 @@ static uint_fixed_16_16_t
skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
- if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_wm_plane_visible(crtc_state, plane_state)))
return u32_to_fixed16(0);
/*
@@ -4606,7 +4660,7 @@ static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
* For anything else just return one slice yet.
* Should be extended for other platforms.
*/
- return BIT(DBUF_S1);
+ return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
}
static u64
@@ -4758,12 +4812,37 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
u32 blocks;
int level;
+ int ret;
/* Clear the partitioning for disabled planes. */
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
if (!crtc_state->hw.active) {
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ /*
+ * FIXME hack to make sure we compute this sensibly when
+ * turning off all the pipes. Otherwise we leave it at
+ * whatever we had previously, and then runtime PM will
+ * mess it up by turning off all but S1. Remove this
+ * once the dbuf state computation flow becomes sane.
+ */
+ if (new_dbuf_state->active_pipes == 0) {
+ new_dbuf_state->enabled_slices = BIT(DBUF_S1);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+ }
+
alloc->start = alloc->end = 0;
return 0;
}
@@ -4778,8 +4857,12 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
plane_data_rate,
uv_plane_data_rate);
- skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
- alloc, &num_active);
+ ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
+ total_data_rate,
+ alloc, &num_active);
+ if (ret)
+ return ret;
+
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -5003,6 +5086,7 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
@@ -5012,7 +5096,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
pixel_rate = crtc_state->pixel_rate;
- if (WARN_ON(pixel_rate == 0))
+ if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0))
return u32_to_fixed16(0);
crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
@@ -5025,11 +5109,13 @@ static u32
skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u64 adjusted_pixel_rate;
uint_fixed_16_16_t downscale_amount;
/* Shouldn't reach here on disabled planes... */
- if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_wm_plane_visible(crtc_state, plane_state)))
return 0;
/*
@@ -5190,7 +5276,9 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
+ if ((IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) &&
dev_priv->ipc_enabled)
latency += 4;
@@ -5465,6 +5553,7 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
int ret;
@@ -5476,9 +5565,10 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
- WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
- WARN_ON(!fb->format->is_yuv ||
- fb->format->num_planes == 1);
+ drm_WARN_ON(&dev_priv->drm,
+ !intel_wm_plane_visible(crtc_state, plane_state));
+ drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv ||
+ fb->format->num_planes == 1);
ret = skl_build_plane_wm_single(crtc_state, plane_state,
y_plane_id, 0);
@@ -5701,13 +5791,13 @@ static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *old_crtc_state;
+ const struct intel_dbuf_state *old_dbuf_state;
+ const struct intel_dbuf_state *new_dbuf_state;
+ const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask;
-
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
ret = skl_allocate_pipe_ddb(new_crtc_state);
@@ -5720,6 +5810,17 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+
+ if (new_dbuf_state &&
+ new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices)
+ drm_dbg_kms(&dev_priv->drm,
+ "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
+ old_dbuf_state->enabled_slices,
+ new_dbuf_state->enabled_slices,
+ INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
+
return 0;
}
@@ -5855,7 +5956,8 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
}
-static int intel_add_all_pipes(struct intel_atomic_state *state)
+static int intel_add_affected_pipes(struct intel_atomic_state *state,
+ u8 pipe_mask)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
@@ -5863,6 +5965,9 @@ static int intel_add_all_pipes(struct intel_atomic_state *state)
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state;
+ if ((pipe_mask & BIT(crtc->pipe)) == 0)
+ continue;
+
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -5875,49 +5980,54 @@ static int
skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int ret;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i, ret;
- /*
- * If this is our first atomic update following hardware readout,
- * we can't trust the DDB that the BIOS programmed for us. Let's
- * pretend that all pipes switched active status so that we'll
- * ensure a full DDB recompute.
- */
if (dev_priv->wm.distrust_bios_wm) {
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
- state->base.acquire_ctx);
+ /*
+ * skl_ddb_get_pipe_allocation_limits() currently requires
+ * all active pipes to be included in the state so that
+ * it can redistribute the dbuf among them, and it really
+ * wants to recompute things when distrust_bios_wm is set
+ * so we add all the pipes to the state.
+ */
+ ret = intel_add_affected_pipes(state, ~0);
if (ret)
return ret;
+ }
- state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ struct intel_dbuf_state *new_dbuf_state;
+ const struct intel_dbuf_state *old_dbuf_state;
+
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+
+ new_dbuf_state->active_pipes =
+ intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes)
+ break;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
/*
- * We usually only initialize state->active_pipes if we
- * we're doing a modeset; make sure this field is always
- * initialized during the sanitization process that happens
- * on the first commit too.
+ * skl_ddb_get_pipe_allocation_limits() currently requires
+ * all active pipes to be included in the state so that
+ * it can redistribute the dbuf among them.
*/
- if (!state->modeset)
- state->active_pipes = dev_priv->active_pipes;
- }
-
- /*
- * If the modeset changes which CRTC's are active, we need to
- * recompute the DDB allocation for *all* active pipes, even
- * those that weren't otherwise being modified in any way by this
- * atomic commit. Due to the shrinking of the per-pipe allocations
- * when new active CRTC's are added, it's possible for a pipe that
- * we were already using and aren't changing at all here to suddenly
- * become invalid if its DDB needs exceeds its new allocation.
- *
- * Note that if we wind up doing a full DDB recompute, we can't let
- * any other display updates race with this transaction, so we need
- * to grab the lock on *all* CRTC's.
- */
- if (state->active_pipe_changes || state->modeset) {
- ret = intel_add_all_pipes(state);
+ ret = intel_add_affected_pipes(state,
+ new_dbuf_state->active_pipes);
if (ret)
return ret;
+
+ break;
}
return 0;
@@ -6163,7 +6273,6 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- skl_ddb_get_hw_state(dev_priv);
for_each_intel_crtc(&dev_priv->drm, crtc) {
crtc_state = to_intel_crtc_state(crtc->base.state);
@@ -6735,7 +6844,9 @@ static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
return false;
/* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
return dev_priv->dram_info.symmetric_memory;
return true;
@@ -6998,6 +7109,10 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ /* Wa_1409120013:icl,ehl */
+ I915_WRITE(ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
/* This is not an Wa. Enable to reduce Sampler power */
I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
@@ -7012,9 +7127,13 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
u32 vd_pg_enable = 0;
unsigned int i;
+ /* Wa_1409120013:tgl */
+ I915_WRITE(ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
for (i = 0; i < I915_MAX_VCS; i++) {
- if (HAS_ENGINE(dev_priv, _VCS(i)))
+ if (HAS_ENGINE(&dev_priv->gt, _VCS(i)))
vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
VDN_MFX_POWERGATE_ENABLE(i);
}
@@ -7055,7 +7174,10 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
- /* WaFbcWakeMemOn:cnl */
+ /*
+ * WaFbcWakeMemOn:cnl
+ * Display WA #0859: cnl
+ */
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
@@ -7081,7 +7203,17 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
cnp_init_clock_gating(dev_priv);
gen9_init_clock_gating(dev_priv);
- /* WaFbcNukeOnHostModify:cfl */
+ /*
+ * WaFbcTurnOffFbcWatermark:cfl
+ * Display WA #0562: cfl
+ */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS);
+
+ /*
+ * WaFbcNukeOnHostModify:cfl
+ * Display WA #0873: cfl
+ */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7100,7 +7232,17 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
- /* WaFbcNukeOnHostModify:kbl */
+ /*
+ * WaFbcTurnOffFbcWatermark:kbl
+ * Display WA #0562: kbl
+ */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS);
+
+ /*
+ * WaFbcNukeOnHostModify:kbl
+ * Display WA #0873: kbl
+ */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7113,15 +7255,37 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
- /* WaFbcNukeOnHostModify:skl */
+ /*
+ * WaFbcTurnOffFbcWatermark:skl
+ * Display WA #0562: skl
+ */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS);
+
+ /*
+ * WaFbcNukeOnHostModify:skl
+ * Display WA #0873: skl
+ */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+
+ /*
+ * WaFbcHighMemBwCorruptionAvoidance:skl
+ * Display WA #0883: skl
+ */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_DISABLE_DUMMY0);
}
static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+ I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
+ I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ HSW_FBCQ_DIS);
+
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -7169,6 +7333,11 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
+ I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
+ I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ HSW_FBCQ_DIS);
+
/* This is required by WaCatErrorRejectionIssue:hsw */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -7186,6 +7355,11 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ /* WaFbcAsynchFlipDisableFbcQueue:ivb */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+
/* WaDisableBackToBackFlipFix:ivb */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
@@ -7371,6 +7545,16 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
+
+ /*
+ * Have FBC ignore 3D activity since we use software
+ * render tracking, and otherwise a pure 3D workload
+ * (even if it just renders a single frame and then does
+ * abosultely nothing) would not allow FBC to recompress
+ * until a 2D blit occurs.
+ */
+ I915_WRITE(SCPD0,
+ _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
}
static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7414,7 +7598,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
- else if (IS_COFFEELAKE(dev_priv))
+ else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
dev_priv->display.init_clock_gating = cfl_init_clock_gating;
else if (IS_SKYLAKE(dev_priv))
dev_priv->display.init_clock_gating = skl_init_clock_gating;
@@ -7544,3 +7728,89 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
dev_priv->runtime_pm.suspended = false;
atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
+
+static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return NULL;
+
+ return &dbuf_state->base;
+}
+
+static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_dbuf_funcs = {
+ .atomic_duplicate_state = intel_dbuf_duplicate_state,
+ .atomic_destroy_state = intel_dbuf_destroy_state,
+};
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *dbuf_state;
+
+ dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj);
+ if (IS_ERR(dbuf_state))
+ return ERR_CAST(dbuf_state);
+
+ return to_intel_dbuf_state(dbuf_state);
+}
+
+int intel_dbuf_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj,
+ &dbuf_state->base, &intel_dbuf_funcs);
+
+ return 0;
+}
+
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(dev_priv,
+ old_dbuf_state->enabled_slices |
+ new_dbuf_state->enabled_slices);
+}
+
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(dev_priv,
+ new_dbuf_state->enabled_slices);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 614ac7f8d4cc..a2473594c2db 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -8,8 +8,10 @@
#include <linux/types.h>
-#include "i915_reg.h"
#include "display/intel_bw.h"
+#include "display/intel_global_state.h"
+
+#include "i915_reg.h"
struct drm_device;
struct drm_i915_private;
@@ -38,6 +40,9 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
+u16 intel_get_ddb_size(struct drm_i915_private *dev_priv);
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
+ const struct skl_ddb_entry *entry);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
@@ -63,4 +68,26 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv);
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
+struct intel_dbuf_state {
+ struct intel_global_state base;
+
+ u8 enabled_slices;
+ u8 active_pipes;
+};
+
+int intel_dbuf_init(struct drm_i915_private *dev_priv);
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
+
+#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
+#define intel_atomic_get_old_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
+#define intel_atomic_get_new_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
+
+int intel_dbuf_init(struct drm_i915_private *dev_priv);
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
+
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index 14b59b899c9b..40d8f1a95df6 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -76,7 +76,7 @@ region_lmem_init(struct intel_memory_region *mem)
{
int ret;
- if (i915_modparams.fake_lmem_start) {
+ if (mem->i915->params.fake_lmem_start) {
ret = init_fake_lmem_bar(mem);
GEM_BUG_ON(ret);
}
@@ -111,12 +111,12 @@ intel_setup_fake_lmem(struct drm_i915_private *i915)
resource_size_t start;
GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
- GEM_BUG_ON(!i915_modparams.fake_lmem_start);
+ GEM_BUG_ON(!i915->params.fake_lmem_start);
/* Your mappable aperture belongs to me now! */
mappable_end = pci_resource_len(pdev, 2);
io_start = pci_resource_start(pdev, 2),
- start = i915_modparams.fake_lmem_start;
+ start = i915->params.fake_lmem_start;
mem = intel_memory_region_create(i915,
start,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 9cb2d7548daa..153ca9e65382 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -116,6 +116,9 @@ track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
depot_stack_handle_t stack)
{
+ struct drm_i915_private *i915 = container_of(rpm,
+ struct drm_i915_private,
+ runtime_pm);
unsigned long flags, n;
bool found = false;
@@ -134,9 +137,9 @@ static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
}
spin_unlock_irqrestore(&rpm->debug.lock, flags);
- if (WARN(!found,
- "Unmatched wakeref (tracking %lu), count %u\n",
- rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
+ if (drm_WARN(&i915->drm, !found,
+ "Unmatched wakeref (tracking %lu), count %u\n",
+ rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
@@ -355,10 +358,14 @@ intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
bool wakelock)
{
+ struct drm_i915_private *i915 = container_of(rpm,
+ struct drm_i915_private,
+ runtime_pm);
int ret;
ret = pm_runtime_get_sync(rpm->kdev);
- WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
+ drm_WARN_ONCE(&i915->drm, ret < 0,
+ "pm_runtime_get_sync() failed: %d\n", ret);
intel_runtime_pm_acquire(rpm, wakelock);
@@ -539,6 +546,9 @@ void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
*/
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
{
+ struct drm_i915_private *i915 = container_of(rpm,
+ struct drm_i915_private,
+ runtime_pm);
struct device *kdev = rpm->kdev;
/*
@@ -565,7 +575,8 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
pm_runtime_dont_use_autosuspend(kdev);
ret = pm_runtime_get_sync(kdev);
- WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
+ drm_WARN(&i915->drm, ret < 0,
+ "pm_runtime_get_sync() failed: %d\n", ret);
} else {
pm_runtime_use_autosuspend(kdev);
}
@@ -580,11 +591,14 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
{
+ struct drm_i915_private *i915 = container_of(rpm,
+ struct drm_i915_private,
+ runtime_pm);
struct device *kdev = rpm->kdev;
/* Transfer rpm ownership back to core */
- WARN(pm_runtime_get_sync(kdev) < 0,
- "Failed to pass rpm ownership back to core\n");
+ drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,
+ "Failed to pass rpm ownership back to core\n");
pm_runtime_dont_use_autosuspend(kdev);
@@ -594,12 +608,15 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
{
+ struct drm_i915_private *i915 = container_of(rpm,
+ struct drm_i915_private,
+ runtime_pm);
int count = atomic_read(&rpm->wakeref_count);
- WARN(count,
- "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
- intel_rpm_raw_wakeref_count(count),
- intel_rpm_wakelock_count(count));
+ drm_WARN(&i915->drm, count,
+ "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
+ intel_rpm_raw_wakeref_count(count),
+ intel_rpm_wakelock_count(count));
untrack_all_intel_runtime_pm_wakerefs(rpm);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a61cb8ca4d50..8d5a933e6af6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -142,7 +142,7 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
- add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
+ add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
}
}
@@ -219,7 +219,7 @@ fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
- add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
+ add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
}
}
@@ -709,7 +709,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
continue;
}
- fw_domain_arm_timer(domain);
+ uncore->funcs.force_wake_put(uncore, domain->mask);
}
}
@@ -1185,7 +1185,7 @@ __unclaimed_reg_debug(struct intel_uncore *uncore,
read ? "read from" : "write to",
i915_mmio_reg_offset(reg)))
/* Only report the first N failures */
- i915_modparams.mmio_debug--;
+ uncore->i915->params.mmio_debug--;
}
static inline void
@@ -1194,7 +1194,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
const bool read,
const bool before)
{
- if (likely(!i915_modparams.mmio_debug))
+ if (likely(!uncore->i915->params.mmio_debug))
return;
/* interrupts are disabled and re-enabled around uncore->lock usage */
@@ -1529,6 +1529,8 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
if (INTEL_GEN(i915) >= 11) {
+ /* we'll prune the domains of missing engines later */
+ intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
int i;
uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
@@ -1541,7 +1543,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_ACK_BLITTER_GEN9);
for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(i915, _VCS(i)))
+ if (!__HAS_ENGINE(emask, _VCS(i)))
continue;
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
@@ -1549,7 +1551,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
}
for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(i915, _VECS(i)))
+ if (!__HAS_ENGINE(emask, _VECS(i)))
continue;
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
@@ -1844,20 +1846,20 @@ out_mmio_cleanup:
* the forcewake domains. Prune them, to make sure they only reference existing
* engines.
*/
-void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
+void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
+ struct intel_gt *gt)
{
- struct drm_i915_private *i915 = uncore->i915;
enum forcewake_domains fw_domains = uncore->fw_domains;
enum forcewake_domain_id domain_id;
int i;
- if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
+ if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915) < 11)
return;
for (i = 0; i < I915_MAX_VCS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
- if (HAS_ENGINE(i915, _VCS(i)))
+ if (HAS_ENGINE(gt, _VCS(i)))
continue;
if (fw_domains & BIT(domain_id))
@@ -1867,7 +1869,7 @@ void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
for (i = 0; i < I915_MAX_VECS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
- if (HAS_ENGINE(i915, _VECS(i)))
+ if (HAS_ENGINE(gt, _VECS(i)))
continue;
if (fw_domains & BIT(domain_id))
@@ -1991,7 +1993,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
unsigned int slow_timeout_ms,
u32 *out_value)
{
- u32 uninitialized_var(reg_value);
+ u32 reg_value;
#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
int ret;
@@ -2093,12 +2095,12 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
goto out;
if (unlikely(check_for_unclaimed_mmio(uncore))) {
- if (!i915_modparams.mmio_debug) {
+ if (!uncore->i915->params.mmio_debug) {
drm_dbg(&uncore->i915->drm,
"Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
- i915_modparams.mmio_debug++;
+ uncore->i915->params.mmio_debug++;
}
uncore->debug->unclaimed_mmio_check--;
ret = true;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 8d3aa8b9acf9..c4b22d9d0b45 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -35,6 +35,7 @@
struct drm_i915_private;
struct intel_runtime_pm;
struct intel_uncore;
+struct intel_gt;
struct intel_uncore_mmio_debug {
spinlock_t lock; /** lock is also taken in irq contexts. */
@@ -186,7 +187,8 @@ intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
void intel_uncore_init_early(struct intel_uncore *uncore,
struct drm_i915_private *i915);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
-void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
+void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
+ struct intel_gt *gt);
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
void intel_uncore_fini_mmio(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 2e471500a646..0016ffc7d914 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -97,6 +97,7 @@ static void fake_put_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops fake_ops = {
+ .name = "fake-gem",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = fake_get_pages,
.put_pages = fake_put_pages,
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 5dd5d81646c4..a92c0e9b7e6b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -11,9 +11,9 @@
* a module parameter. It must be unique and legal for a C identifier.
*
* The function should be of type int function(void). It may be conditionally
- * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
*
- * Tests are executed in order by igt/drv_selftest
+ * Tests are executed in order by igt/i915_selftest
*/
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 6090ce35226b..3db34d3eea58 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -11,9 +11,9 @@
* a module parameter. It must be unique and legal for a C identifier.
*
* The function should be of type int function(void). It may be conditionally
- * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
*
- * Tests are executed in order by igt/drv_selftest
+ * Tests are executed in order by igt/i915_selftest
*/
selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
selftest(shmem, shmem_utils_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index 8eb3108f1767..c2d001d9c0ec 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -162,7 +162,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
len = 5;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(rq->engine->i915) >= 8)
len++;
*cs++ = GFX_OP_PIPE_CONTROL(len);
@@ -280,11 +280,144 @@ out:
return err;
}
+static int live_noa_gpr(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 *cs, *store;
+ void *scratch;
+ u32 gpr0;
+ int err;
+ int i;
+
+ /* Check that the delay does not clobber user context state (GPR) */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -ENOMEM;
+
+ gpr0 = i915_mmio_reg_offset(GEN8_RING_CS_GPR(stream->engine->mmio_base, 0));
+
+ ce = intel_context_create(stream->engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ /* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */
+ scratch = kmap(ce->vm->scratch[0].base.page);
+ memset(scratch, POISON_FREE, PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+ i915_request_get(rq);
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out_rq;
+ }
+ }
+
+ /* Fill the 16 qword [32 dword] GPR with a known unlikely value */
+ cs = intel_ring_begin(rq, 2 * 32 + 2);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto out_rq;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(32);
+ for (i = 0; i < 32; i++) {
+ *cs++ = gpr0 + i * sizeof(u32);
+ *cs++ = STACK_MAGIC;
+ }
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Execute the GPU delay */
+ err = rq->engine->emit_bb_start(rq,
+ i915_ggtt_offset(stream->noa_wait), 0,
+ I915_DISPATCH_SECURE);
+ if (err) {
+ i915_request_add(rq);
+ goto out_rq;
+ }
+
+ /* Read the GPR back, using the pinned global HWSP for convenience */
+ store = memset32(rq->engine->status_page.addr + 512, 0, 32);
+ for (i = 0; i < 32; i++) {
+ u32 cmd;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto out_rq;
+ }
+
+ cmd = MI_STORE_REGISTER_MEM;
+ if (INTEL_GEN(i915) >= 8)
+ cmd++;
+ cmd |= MI_USE_GGTT;
+
+ *cs++ = cmd;
+ *cs++ = gpr0 + i * sizeof(u32);
+ *cs++ = i915_ggtt_offset(rq->engine->status_page.vma) +
+ offset_in_page(store) +
+ i * sizeof(u32);
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+ }
+
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, HZ / 2) < 0) {
+ pr_err("noa_wait timed out\n");
+ intel_gt_set_wedged(stream->engine->gt);
+ err = -EIO;
+ goto out_rq;
+ }
+
+ /* Verify that the GPR contain our expected values */
+ for (i = 0; i < 32; i++) {
+ if (store[i] == STACK_MAGIC)
+ continue;
+
+ pr_err("GPR[%d] lost, found:%08x, expected:%08x!\n",
+ i, store[i], STACK_MAGIC);
+ err = -EINVAL;
+ }
+
+ /* Verify that the user's scratch page was not used for GPR storage */
+ if (memchr_inv(scratch, POISON_FREE, PAGE_SIZE)) {
+ pr_err("Scratch page overwritten!\n");
+ igt_hexdump(scratch, 4096);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ kunmap(ce->vm->scratch[0].base.page);
+ intel_context_put(ce);
+out:
+ stream_destroy(stream);
+ return err;
+}
+
int i915_perf_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_noa_delay),
+ SUBTEST(live_noa_gpr),
};
struct i915_perf *perf = &i915->perf;
int err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
index d8da142985eb..c2389f8a257d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -11,7 +11,7 @@
* a module parameter. It must be unique and legal for a C identifier.
*
* The function should be of type int function(void). It may be conditionally
- * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
*
* Tests are executed in order by igt/i915_selftest
*/
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6014e8dfcbb1..57dd6f5122ee 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -24,16 +24,21 @@
#include <linux/prime_numbers.h>
#include <linux/pm_qos.h>
+#include <linux/sort.h>
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
+#include "gt/selftest_engine_heartbeat.h"
#include "i915_random.h"
#include "i915_selftest.h"
+#include "igt_flush_test.h"
#include "igt_live_test.h"
#include "igt_spinner.h"
#include "lib_sw_fence.h"
@@ -1449,7 +1454,7 @@ out_flush:
idx++;
}
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
- num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
+ num_waits, num_fences, idx, ncpus);
ret = igt_live_test_end(&live) ?: ret;
out_contexts:
@@ -1524,6 +1529,808 @@ struct perf_series {
struct intel_context *ce[];
};
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static u32 trifilter(u32 *a)
+{
+ u64 sum;
+
+#define TF_COUNT 5
+ sort(a, TF_COUNT, sizeof(*a), cmp_u32, NULL);
+
+ sum = mul_u32_u32(a[2], 2);
+ sum += a[1];
+ sum += a[3];
+
+ GEM_BUG_ON(sum > U32_MAX);
+ return sum;
+#define TF_BIAS 2
+}
+
+static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles)
+{
+ u64 ns = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
+
+ return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS);
+}
+
+static u32 *emit_timestamp_store(u32 *cs, struct intel_context *ce, u32 offset)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP((ce->engine->mmio_base)));
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_store_dw(u32 *cs, u32 offset, u32 value)
+{
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static u32 *emit_semaphore_poll(u32 *cs, u32 mode, u32 value, u32 offset)
+{
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ mode;
+ *cs++ = value;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_semaphore_poll_until(u32 *cs, u32 offset, u32 value)
+{
+ return emit_semaphore_poll(cs, MI_SEMAPHORE_SAD_EQ_SDD, value, offset);
+}
+
+static void semaphore_set(u32 *sema, u32 value)
+{
+ WRITE_ONCE(*sema, value);
+ wmb(); /* flush the update to the cache, and beyond */
+}
+
+static u32 *hwsp_scratch(const struct intel_context *ce)
+{
+ return memset32(ce->engine->status_page.addr + 1000, 0, 21);
+}
+
+static u32 hwsp_offset(const struct intel_context *ce, u32 *dw)
+{
+ return (i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(dw));
+}
+
+static int measure_semaphore_response(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how many cycles it takes for the HW to detect the change
+ * in a semaphore value.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * poke semaphore
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Semaphore latency: B - A
+ */
+
+ semaphore_set(sema, -1);
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4 + 12 * ARRAY_SIZE(elapsed));
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, offset, 0);
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+ cs = emit_store_dw(cs, offset, 0);
+ }
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ if (wait_for(READ_ONCE(*sema) == 0, 50)) {
+ err = -EIO;
+ goto err;
+ }
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ preempt_disable();
+ cycles = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ semaphore_set(sema, i);
+ preempt_enable();
+
+ if (wait_for(READ_ONCE(*sema) == 0, 50)) {
+ err = -EIO;
+ goto err;
+ }
+
+ elapsed[i - 1] = sema[i] - cycles;
+ }
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: semaphore response %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_idle_dispatch(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how long it takes for us to submit a request while the
+ * engine is idle, but is resting in our context.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * submit request
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Submission latency: B - A
+ */
+
+ for (i = 0; i < ARRAY_SIZE(elapsed); i++) {
+ struct i915_request *rq;
+
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ return err;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+
+ preempt_disable();
+ local_bh_disable();
+ elapsed[i] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ i915_request_add(rq);
+ local_bh_enable();
+ preempt_enable();
+ }
+
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(elapsed); i++)
+ elapsed[i] = sema[i] - elapsed[i];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: idle dispatch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_busy_dispatch(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT + 1], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how long it takes for us to submit a request while the
+ * engine is busy, polling on a semaphore in our context. With
+ * direct submission, this will include the cost of a lite restore.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * submit request
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Submission latency: B - A
+ */
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, offset + i * sizeof(u32), -1);
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+
+ if (i > 1 && wait_for(READ_ONCE(sema[i - 1]), 500)) {
+ err = -EIO;
+ goto err;
+ }
+
+ preempt_disable();
+ local_bh_disable();
+ elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ i915_request_add(rq);
+ local_bh_enable();
+ semaphore_set(sema, i - 1);
+ preempt_enable();
+ }
+
+ wait_for(READ_ONCE(sema[i - 1]), 500);
+ semaphore_set(sema, i - 1);
+
+ for (i = 1; i <= TF_COUNT; i++) {
+ GEM_BUG_ON(sema[i] == -1);
+ elapsed[i - 1] = sema[i] - elapsed[i];
+ }
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: busy dispatch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int plug(struct intel_engine_cs *engine, u32 *sema, u32 mode, int value)
+{
+ const u32 offset =
+ i915_ggtt_offset(engine->status_page.vma) +
+ offset_in_page(sema);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ cs = emit_semaphore_poll(cs, mode, value, offset);
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static int measure_inter_request(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT + 1], cycles;
+ struct i915_sw_fence *submit;
+ int i, err;
+
+ /*
+ * Measure how long it takes to advance from one request into the
+ * next. Between each request we flush the GPU caches to memory,
+ * update the breadcrumbs, and then invalidate those caches.
+ * We queue up all the requests to be submitted in one batch so
+ * it should be one set of contiguous measurements.
+ *
+ * A: read CS_TIMESTAMP on GPU
+ * advance request
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Request latency: B - A
+ */
+
+ err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0);
+ if (err)
+ return err;
+
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ semaphore_set(sema, 1);
+ return -ENOMEM;
+ }
+
+ intel_engine_flush_submission(ce->engine);
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_submit;
+ }
+
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ submit,
+ GFP_KERNEL);
+ if (err < 0) {
+ i915_request_add(rq);
+ goto err_submit;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err_submit;
+ }
+
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+ }
+ local_bh_disable();
+ i915_sw_fence_commit(submit);
+ local_bh_enable();
+ intel_engine_flush_submission(ce->engine);
+ heap_fence_put(submit);
+
+ semaphore_set(sema, 1);
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[i + 1] - sema[i];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: inter-request latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err_submit:
+ i915_sw_fence_commit(submit);
+ heap_fence_put(submit);
+ semaphore_set(sema, 1);
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_context_switch(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ struct i915_request *fence = NULL;
+ u32 elapsed[TF_COUNT + 1], cycles;
+ int i, j, err;
+ u32 *cs;
+
+ /*
+ * Measure how long it takes to advance from one request in one
+ * context to a request in another context. This allows us to
+ * measure how long the context save/restore take, along with all
+ * the inter-context setup we require.
+ *
+ * A: read CS_TIMESTAMP on GPU
+ * switch context
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Context switch latency: B - A
+ */
+
+ err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0);
+ if (err)
+ return err;
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct intel_context *arr[] = {
+ ce, ce->engine->kernel_context
+ };
+ u32 addr = offset + ARRAY_SIZE(arr) * i * sizeof(u32);
+
+ for (j = 0; j < ARRAY_SIZE(arr); j++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(arr[j]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_fence;
+ }
+
+ if (fence) {
+ err = i915_request_await_dma_fence(rq,
+ &fence->fence);
+ if (err) {
+ i915_request_add(rq);
+ goto err_fence;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err_fence;
+ }
+
+ cs = emit_timestamp_store(cs, ce, addr);
+ addr += sizeof(u32);
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_put(fence);
+ fence = i915_request_get(rq);
+
+ i915_request_add(rq);
+ }
+ }
+ i915_request_put(fence);
+ intel_engine_flush_submission(ce->engine);
+
+ semaphore_set(sema, 1);
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[2 * i + 2] - sema[2 * i + 1];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: context switch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err_fence:
+ i915_request_put(fence);
+ semaphore_set(sema, 1);
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_preemption(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * We measure two latencies while triggering preemption. The first
+ * latency is how long it takes for us to submit a preempting request.
+ * The second latency is how it takes for us to return from the
+ * preemption back to the original context.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * submit preemption
+ * B: read CS_TIMESTAMP on GPU (in preempting context)
+ * context switch
+ * C: read CS_TIMESTAMP on GPU (in original context)
+ *
+ * Preemption dispatch latency: B - A
+ * Preemption switch latency: C - B
+ */
+
+ if (!intel_engine_has_preemption(ce->engine))
+ return 0;
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ u32 addr = offset + 2 * i * sizeof(u32);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, addr, -1);
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, addr + sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ if (wait_for(READ_ONCE(sema[2 * i]) == -1, 500)) {
+ err = -EIO;
+ goto err;
+ }
+
+ rq = i915_request_create(ce->engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_timestamp_store(cs, ce, addr);
+ cs = emit_store_dw(cs, offset, i);
+
+ intel_ring_advance(rq, cs);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+
+ elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ i915_request_add(rq);
+ }
+
+ if (wait_for(READ_ONCE(sema[2 * i - 2]) != -1, 500)) {
+ err = -EIO;
+ goto err;
+ }
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[2 * i + 0] - elapsed[i - 1];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: preemption dispatch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[2 * i + 1] - sema[2 * i + 0];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: preemption switch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+struct signal_cb {
+ struct dma_fence_cb base;
+ bool seen;
+};
+
+static void signal_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct signal_cb *s = container_of(cb, typeof(*s), base);
+
+ smp_store_mb(s->seen, true); /* be safe, be strong */
+}
+
+static int measure_completion(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how long it takes for the signal (interrupt) to be
+ * sent from the GPU to be processed by the CPU.
+ *
+ * A: read CS_TIMESTAMP on GPU
+ * signal
+ * B: read CS_TIMESTAMP from CPU
+ *
+ * Completion latency: B - A
+ */
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct signal_cb cb = { .seen = false };
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, offset + i * sizeof(u32), -1);
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+
+ dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
+
+ local_bh_disable();
+ i915_request_add(rq);
+ local_bh_enable();
+
+ if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
+ err = -EIO;
+ goto err;
+ }
+
+ preempt_disable();
+ semaphore_set(sema, i);
+ while (!READ_ONCE(cb.seen))
+ cpu_relax();
+
+ elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ preempt_enable();
+ }
+
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(elapsed); i++) {
+ GEM_BUG_ON(sema[i + 1] == -1);
+ elapsed[i] = elapsed[i] - sema[i + 1];
+ }
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: completion latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static void rps_pin(struct intel_gt *gt)
+{
+ /* Pin the frequency to max */
+ atomic_inc(&gt->rps.num_waiters);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ mutex_lock(&gt->rps.lock);
+ intel_rps_set(&gt->rps, gt->rps.max_freq);
+ mutex_unlock(&gt->rps.lock);
+}
+
+static void rps_unpin(struct intel_gt *gt)
+{
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ atomic_dec(&gt->rps.num_waiters);
+}
+
+static int perf_request_latency(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct pm_qos_request qos;
+ int err = 0;
+
+ if (INTEL_GEN(i915) < 8) /* per-engine CS timestamp, semaphores */
+ return 0;
+
+ cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
+
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ st_engine_heartbeat_disable(engine);
+ rps_pin(engine->gt);
+
+ if (err == 0)
+ err = measure_semaphore_response(ce);
+ if (err == 0)
+ err = measure_idle_dispatch(ce);
+ if (err == 0)
+ err = measure_busy_dispatch(ce);
+ if (err == 0)
+ err = measure_inter_request(ce);
+ if (err == 0)
+ err = measure_context_switch(ce);
+ if (err == 0)
+ err = measure_preemption(ce);
+ if (err == 0)
+ err = measure_completion(ce);
+
+ rps_unpin(engine->gt);
+ st_engine_heartbeat_enable(engine);
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ if (err)
+ goto out;
+ }
+
+out:
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ cpu_latency_qos_remove_request(&qos);
+ return err;
+}
+
static int s_sync0(void *arg)
{
struct perf_series *ps = arg;
@@ -1685,9 +2492,11 @@ static int perf_series_engines(void *arg)
intel_engine_pm_get(p->engine);
if (intel_engine_supports_stats(p->engine))
- p->busy = intel_engine_get_busy_time(p->engine) + 1;
+ p->busy = intel_engine_get_busy_time(p->engine,
+ &p->time) + 1;
+ else
+ p->time = ktime_get();
p->runtime = -intel_context_get_total_runtime_ns(ce);
- p->time = ktime_get();
}
err = (*fn)(ps);
@@ -1698,13 +2507,15 @@ static int perf_series_engines(void *arg)
struct perf_stats *p = &stats[idx];
struct intel_context *ce = ps->ce[idx];
int integer, decimal;
- u64 busy, dt;
+ u64 busy, dt, now;
- p->time = ktime_sub(ktime_get(), p->time);
- if (p->busy) {
- p->busy = ktime_sub(intel_engine_get_busy_time(p->engine),
+ if (p->busy)
+ p->busy = ktime_sub(intel_engine_get_busy_time(p->engine,
+ &now),
p->busy - 1);
- }
+ else
+ now = ktime_get();
+ p->time = ktime_sub(now, p->time);
err = switch_to_kernel_sync(ce, err);
p->runtime += intel_context_get_total_runtime_ns(ce);
@@ -1764,13 +2575,14 @@ static int p_sync0(void *arg)
return err;
}
- busy = false;
if (intel_engine_supports_stats(engine)) {
- p->busy = intel_engine_get_busy_time(engine);
+ p->busy = intel_engine_get_busy_time(engine, &p->time);
busy = true;
+ } else {
+ p->time = ktime_get();
+ busy = false;
}
- p->time = ktime_get();
count = 0;
do {
struct i915_request *rq;
@@ -1793,11 +2605,15 @@ static int p_sync0(void *arg)
count++;
} while (!__igt_timeout(end_time, NULL));
- p->time = ktime_sub(ktime_get(), p->time);
if (busy) {
- p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ ktime_t now;
+
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
p->busy);
+ p->time = ktime_sub(now, p->time);
+ } else {
+ p->time = ktime_sub(ktime_get(), p->time);
}
err = switch_to_kernel_sync(ce, err);
@@ -1830,13 +2646,14 @@ static int p_sync1(void *arg)
return err;
}
- busy = false;
if (intel_engine_supports_stats(engine)) {
- p->busy = intel_engine_get_busy_time(engine);
+ p->busy = intel_engine_get_busy_time(engine, &p->time);
busy = true;
+ } else {
+ p->time = ktime_get();
+ busy = false;
}
- p->time = ktime_get();
count = 0;
do {
struct i915_request *rq;
@@ -1861,11 +2678,15 @@ static int p_sync1(void *arg)
count++;
} while (!__igt_timeout(end_time, NULL));
i915_request_put(prev);
- p->time = ktime_sub(ktime_get(), p->time);
if (busy) {
- p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ ktime_t now;
+
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
p->busy);
+ p->time = ktime_sub(now, p->time);
+ } else {
+ p->time = ktime_sub(ktime_get(), p->time);
}
err = switch_to_kernel_sync(ce, err);
@@ -1897,14 +2718,15 @@ static int p_many(void *arg)
return err;
}
- busy = false;
if (intel_engine_supports_stats(engine)) {
- p->busy = intel_engine_get_busy_time(engine);
+ p->busy = intel_engine_get_busy_time(engine, &p->time);
busy = true;
+ } else {
+ p->time = ktime_get();
+ busy = false;
}
count = 0;
- p->time = ktime_get();
do {
struct i915_request *rq;
@@ -1917,11 +2739,15 @@ static int p_many(void *arg)
i915_request_add(rq);
count++;
} while (!__igt_timeout(end_time, NULL));
- p->time = ktime_sub(ktime_get(), p->time);
if (busy) {
- p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ ktime_t now;
+
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
p->busy);
+ p->time = ktime_sub(now, p->time);
+ } else {
+ p->time = ktime_sub(ktime_get(), p->time);
}
err = switch_to_kernel_sync(ce, err);
@@ -2042,6 +2868,7 @@ static int perf_parallel_engines(void *arg)
int i915_request_perf_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(perf_request_latency),
SUBTEST(perf_series_engines),
SUBTEST(perf_parallel_engines),
};
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index e35ba5f9e73f..ec0ecb4e4ca6 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -134,15 +134,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
- if (INTEL_GEN(rq->i915) >= 8) {
+ if (INTEL_GEN(rq->engine->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
- } else if (INTEL_GEN(rq->i915) >= 6) {
+ } else if (INTEL_GEN(rq->engine->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
- } else if (INTEL_GEN(rq->i915) >= 4) {
+ } else if (INTEL_GEN(rq->engine->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
@@ -154,11 +154,11 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = arbitration_command;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(rq->engine->i915) >= 8)
*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
- else if (IS_HASWELL(rq->i915))
+ else if (IS_HASWELL(rq->engine->i915))
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
- else if (INTEL_GEN(rq->i915) >= 6)
+ else if (INTEL_GEN(rq->engine->i915) >= 6)
*batch++ = MI_BATCH_BUFFER_START;
else
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
@@ -176,7 +176,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
}
flags = 0;
- if (INTEL_GEN(rq->i915) <= 5)
+ if (INTEL_GEN(rq->engine->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
@@ -221,8 +221,8 @@ bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
- 10) &&
+ 100) &&
wait_for(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
- 1000));
+ 50));
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9b105b811f1f..b9810bf156c3 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -24,6 +24,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/iommu.h>
#include <drm/drm_managed.h>
@@ -118,6 +119,9 @@ struct drm_i915_private *mock_gem_device(void)
{
struct drm_i915_private *i915;
struct pci_dev *pdev;
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+ struct dev_iommu iommu;
+#endif
int err;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
@@ -136,8 +140,10 @@ struct drm_i915_private *mock_gem_device(void)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
- /* hack to disable iommu for the fake device; force identity mapping */
- pdev->dev.archdata.iommu = (void *)-1;
+ /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */
+ memset(&iommu, 0, sizeof(iommu));
+ iommu.priv = (void *)-1;
+ pdev->dev.iommu = &iommu;
#endif
pci_set_drvdata(pdev, i915);
@@ -190,7 +196,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915, &i915->ggtt);
i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
- mkwrite_device_info(i915)->engine_mask = BIT(0);
+ mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
+ i915->gt.info.engine_mask = BIT(0);
i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
if (!i915->gt.engine[RCS0])
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index edc5e3dda8ca..b173086411ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -38,7 +38,8 @@ static void mock_insert_entries(struct i915_address_space *vm,
{
}
-static int mock_bind_ppgtt(struct i915_vma *vma,
+static int mock_bind_ppgtt(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
@@ -47,7 +48,8 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
return 0;
}
-static void mock_unbind_ppgtt(struct i915_vma *vma)
+static void mock_unbind_ppgtt(struct i915_address_space *vm,
+ struct i915_vma *vma)
{
}
@@ -88,7 +90,8 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
return ppgtt;
}
-static int mock_bind_ggtt(struct i915_vma *vma,
+static int mock_bind_ggtt(struct i915_address_space *vm,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
@@ -96,7 +99,8 @@ static int mock_bind_ggtt(struct i915_vma *vma,
return 0;
}
-static void mock_unbind_ggtt(struct i915_vma *vma)
+static void mock_unbind_ggtt(struct i915_address_space *vm,
+ struct i915_vma *vma)
{
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index b2ad41c27e67..09660f5a0a4c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -9,6 +9,7 @@
#include "mock_region.h"
static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
+ .name = "mock-region",
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region,